2024-11-07 12:27:42,071 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-07 12:27:42,083 main DEBUG Took 0.009684 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-07 12:27:42,083 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-07 12:27:42,083 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-07 12:27:42,084 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-07 12:27:42,085 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,092 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-07 12:27:42,103 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,104 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,105 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,105 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,106 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,106 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,106 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,107 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,107 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,107 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,108 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,108 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,109 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,109 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,109 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,110 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,110 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,110 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,111 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,111 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,111 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,112 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,112 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,112 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:27:42,113 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,113 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-07 12:27:42,114 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:27:42,115 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-07 12:27:42,117 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-07 12:27:42,117 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-07 12:27:42,118 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-07 12:27:42,118 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-07 12:27:42,127 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-07 12:27:42,129 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-07 12:27:42,130 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-07 12:27:42,131 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-07 12:27:42,131 main DEBUG createAppenders(={Console}) 2024-11-07 12:27:42,132 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-07 12:27:42,132 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-07 12:27:42,132 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-07 12:27:42,133 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-07 12:27:42,133 main DEBUG OutputStream closed 2024-11-07 12:27:42,133 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-07 12:27:42,133 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-07 12:27:42,134 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-07 12:27:42,203 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-07 12:27:42,205 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-07 12:27:42,206 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-07 12:27:42,207 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-07 12:27:42,207 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-07 12:27:42,208 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-07 12:27:42,208 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-07 12:27:42,208 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-07 12:27:42,208 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-07 12:27:42,209 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-07 12:27:42,209 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-07 12:27:42,209 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-07 12:27:42,210 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-07 12:27:42,210 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-07 12:27:42,210 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-07 12:27:42,210 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-07 12:27:42,211 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-07 12:27:42,211 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-07 12:27:42,214 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07 12:27:42,214 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-07 12:27:42,214 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-07 12:27:42,215 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-07T12:27:42,450 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab 2024-11-07 12:27:42,453 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-07 12:27:42,453 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07T12:27:42,462 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-07T12:27:42,499 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=161, ProcessCount=11, AvailableMemoryMB=7069 2024-11-07T12:27:42,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:27:42,518 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89, deleteOnExit=true 2024-11-07T12:27:42,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:27:42,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/test.cache.data in system properties and HBase conf 2024-11-07T12:27:42,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:27:42,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:27:42,521 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:27:42,521 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:27:42,521 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:27:42,613 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-07T12:27:42,699 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:27:42,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:27:42,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:27:42,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:27:42,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:27:42,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:27:42,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:27:42,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:27:42,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:27:42,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:27:42,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:27:42,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:27:42,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:27:42,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:27:42,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:27:43,172 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:27:43,495 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-07T12:27:43,571 INFO [Time-limited test {}] log.Log(170): Logging initialized @2242ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-07T12:27:43,644 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:27:43,703 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:27:43,723 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:27:43,723 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:27:43,724 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:27:43,737 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:27:43,740 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:27:43,741 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:27:43,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/java.io.tmpdir/jetty-localhost-33751-hadoop-hdfs-3_4_1-tests_jar-_-any-10460241385279144455/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:27:43,959 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:33751} 2024-11-07T12:27:43,959 INFO [Time-limited test {}] server.Server(415): Started @2632ms 2024-11-07T12:27:43,989 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:27:44,337 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:27:44,346 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:27:44,347 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:27:44,348 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:27:44,348 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:27:44,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:27:44,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:27:44,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/java.io.tmpdir/jetty-localhost-39873-hadoop-hdfs-3_4_1-tests_jar-_-any-1792376286665726566/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:27:44,494 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:39873} 2024-11-07T12:27:44,495 INFO [Time-limited test {}] server.Server(415): Started @3167ms 2024-11-07T12:27:44,557 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:27:44,686 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:27:44,693 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:27:44,694 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:27:44,695 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:27:44,695 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:27:44,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:27:44,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:27:44,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/java.io.tmpdir/jetty-localhost-39049-hadoop-hdfs-3_4_1-tests_jar-_-any-2529964330357722173/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:27:44,835 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:39049} 2024-11-07T12:27:44,836 INFO [Time-limited test {}] server.Server(415): Started @3508ms 2024-11-07T12:27:44,838 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:27:44,995 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/data/data1/current/BP-1757488485-172.17.0.3-1730982463262/current, will proceed with Du for space computation calculation, 2024-11-07T12:27:44,995 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/data/data3/current/BP-1757488485-172.17.0.3-1730982463262/current, will proceed with Du for space computation calculation, 2024-11-07T12:27:44,995 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/data/data4/current/BP-1757488485-172.17.0.3-1730982463262/current, will proceed with Du for space computation calculation, 2024-11-07T12:27:44,995 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/data/data2/current/BP-1757488485-172.17.0.3-1730982463262/current, will proceed with Du for space computation calculation, 2024-11-07T12:27:45,047 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:27:45,047 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:27:45,140 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50923cd1ee6f2814 with lease ID 0xfd2281eea7b68f09: Processing first storage report for DS-a6283247-7f49-4600-89dc-44c0cfcd0a12 from datanode DatanodeRegistration(127.0.0.1:37581, datanodeUuid=9a3704d1-8ad0-4329-9ca2-c2c3274aabf5, infoPort=44959, infoSecurePort=0, ipcPort=41873, storageInfo=lv=-57;cid=testClusterID;nsid=824020713;c=1730982463262) 2024-11-07T12:27:45,141 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50923cd1ee6f2814 with lease ID 0xfd2281eea7b68f09: from storage DS-a6283247-7f49-4600-89dc-44c0cfcd0a12 node DatanodeRegistration(127.0.0.1:37581, datanodeUuid=9a3704d1-8ad0-4329-9ca2-c2c3274aabf5, infoPort=44959, infoSecurePort=0, ipcPort=41873, storageInfo=lv=-57;cid=testClusterID;nsid=824020713;c=1730982463262), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-07T12:27:45,142 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x98d863d3a6ed0e14 with lease ID 0xfd2281eea7b68f0a: Processing first storage report for DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7 from datanode DatanodeRegistration(127.0.0.1:42917, datanodeUuid=82765e69-58f6-4989-82a4-6fc772a0fd26, infoPort=38661, infoSecurePort=0, ipcPort=37045, storageInfo=lv=-57;cid=testClusterID;nsid=824020713;c=1730982463262) 2024-11-07T12:27:45,142 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98d863d3a6ed0e14 with lease ID 0xfd2281eea7b68f0a: from storage DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7 node DatanodeRegistration(127.0.0.1:42917, datanodeUuid=82765e69-58f6-4989-82a4-6fc772a0fd26, infoPort=38661, infoSecurePort=0, ipcPort=37045, storageInfo=lv=-57;cid=testClusterID;nsid=824020713;c=1730982463262), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:27:45,143 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50923cd1ee6f2814 with lease ID 0xfd2281eea7b68f09: Processing first storage report for DS-f2c1a5b6-37b5-4ce3-889f-5325660f366c from datanode DatanodeRegistration(127.0.0.1:37581, datanodeUuid=9a3704d1-8ad0-4329-9ca2-c2c3274aabf5, infoPort=44959, infoSecurePort=0, ipcPort=41873, storageInfo=lv=-57;cid=testClusterID;nsid=824020713;c=1730982463262) 2024-11-07T12:27:45,143 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50923cd1ee6f2814 with lease ID 0xfd2281eea7b68f09: from storage DS-f2c1a5b6-37b5-4ce3-889f-5325660f366c node DatanodeRegistration(127.0.0.1:37581, datanodeUuid=9a3704d1-8ad0-4329-9ca2-c2c3274aabf5, infoPort=44959, infoSecurePort=0, ipcPort=41873, storageInfo=lv=-57;cid=testClusterID;nsid=824020713;c=1730982463262), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:27:45,143 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x98d863d3a6ed0e14 with lease ID 0xfd2281eea7b68f0a: Processing first storage report for DS-414c69ff-3dbc-42b1-b492-56c214e41ad9 from datanode DatanodeRegistration(127.0.0.1:42917, datanodeUuid=82765e69-58f6-4989-82a4-6fc772a0fd26, infoPort=38661, infoSecurePort=0, ipcPort=37045, storageInfo=lv=-57;cid=testClusterID;nsid=824020713;c=1730982463262) 2024-11-07T12:27:45,144 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98d863d3a6ed0e14 with lease ID 0xfd2281eea7b68f0a: from storage DS-414c69ff-3dbc-42b1-b492-56c214e41ad9 node DatanodeRegistration(127.0.0.1:42917, datanodeUuid=82765e69-58f6-4989-82a4-6fc772a0fd26, infoPort=38661, infoSecurePort=0, ipcPort=37045, storageInfo=lv=-57;cid=testClusterID;nsid=824020713;c=1730982463262), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:27:45,222 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab 2024-11-07T12:27:45,297 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/zookeeper_0, clientPort=49916, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:27:45,307 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49916 2024-11-07T12:27:45,322 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:27:45,326 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:27:45,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:27:45,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:27:45,959 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865 with version=8 2024-11-07T12:27:45,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/hbase-staging 2024-11-07T12:27:46,064 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-07T12:27:46,310 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:27:46,321 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:27:46,321 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:27:46,325 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:27:46,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:27:46,326 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:27:46,462 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:27:46,521 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-07T12:27:46,529 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-07T12:27:46,533 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:27:46,560 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 5080 (auto-detected) 2024-11-07T12:27:46,561 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-07T12:27:46,580 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34159 2024-11-07T12:27:46,601 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34159 connecting to ZooKeeper ensemble=127.0.0.1:49916 2024-11-07T12:27:46,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:341590x0, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:27:46,638 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34159-0x1001a36599d0000 connected 2024-11-07T12:27:46,665 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:27:46,668 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:27:46,677 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:27:46,681 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865, hbase.cluster.distributed=false 2024-11-07T12:27:46,705 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:27:46,709 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34159 2024-11-07T12:27:46,709 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34159 2024-11-07T12:27:46,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34159 2024-11-07T12:27:46,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34159 2024-11-07T12:27:46,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34159 2024-11-07T12:27:46,823 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:27:46,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:27:46,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:27:46,825 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:27:46,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:27:46,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:27:46,829 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:27:46,831 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:27:46,832 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43727 2024-11-07T12:27:46,834 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43727 connecting to ZooKeeper ensemble=127.0.0.1:49916 2024-11-07T12:27:46,835 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:27:46,839 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:27:46,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:437270x0, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:27:46,847 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43727-0x1001a36599d0001 connected 2024-11-07T12:27:46,848 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:27:46,852 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:27:46,859 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:27:46,861 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:27:46,866 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:27:46,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43727 2024-11-07T12:27:46,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43727 2024-11-07T12:27:46,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43727 2024-11-07T12:27:46,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43727 2024-11-07T12:27:46,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43727 2024-11-07T12:27:46,884 DEBUG [M:0;87fb68174c52:34159 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87fb68174c52:34159 2024-11-07T12:27:46,885 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87fb68174c52,34159,1730982466116 2024-11-07T12:27:46,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:27:46,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:27:46,894 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87fb68174c52,34159,1730982466116 2024-11-07T12:27:46,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:27:46,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:46,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:46,915 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:27:46,917 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87fb68174c52,34159,1730982466116 from backup master directory 2024-11-07T12:27:46,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87fb68174c52,34159,1730982466116 2024-11-07T12:27:46,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:27:46,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:27:46,921 WARN [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:27:46,921 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87fb68174c52,34159,1730982466116 2024-11-07T12:27:46,923 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-07T12:27:46,924 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-07T12:27:46,978 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/hbase.id] with ID: 54453467-f894-4cf6-9004-0469685a67cb 2024-11-07T12:27:46,978 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/.tmp/hbase.id 2024-11-07T12:27:46,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:27:46,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:27:46,994 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/.tmp/hbase.id]:[hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/hbase.id] 2024-11-07T12:27:47,036 INFO [master/87fb68174c52:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:27:47,041 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:27:47,060 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-07T12:27:47,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:47,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:47,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:27:47,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:27:47,096 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:27:47,099 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:27:47,105 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:27:47,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:27:47,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:27:47,155 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store 2024-11-07T12:27:47,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:27:47,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:27:47,180 INFO [master/87fb68174c52:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-07T12:27:47,183 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:27:47,184 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:27:47,184 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:27:47,184 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:27:47,186 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:27:47,186 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:27:47,186 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:27:47,187 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982467184Disabling compacts and flushes for region at 1730982467184Disabling writes for close at 1730982467186 (+2 ms)Writing region close event to WAL at 1730982467186Closed at 1730982467186 2024-11-07T12:27:47,189 WARN [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/.initializing 2024-11-07T12:27:47,189 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/WALs/87fb68174c52,34159,1730982466116 2024-11-07T12:27:47,214 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C34159%2C1730982466116, suffix=, logDir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/WALs/87fb68174c52,34159,1730982466116, archiveDir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/oldWALs, maxLogs=10 2024-11-07T12:27:47,224 INFO [master/87fb68174c52:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C34159%2C1730982466116.1730982467220 2024-11-07T12:27:47,243 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/WALs/87fb68174c52,34159,1730982466116/87fb68174c52%2C34159%2C1730982466116.1730982467220 2024-11-07T12:27:47,251 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44959:44959),(127.0.0.1/127.0.0.1:38661:38661)] 2024-11-07T12:27:47,253 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:27:47,253 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:27:47,256 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,257 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,295 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,325 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:27:47,330 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:47,333 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:27:47,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,337 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:27:47,337 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:47,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:27:47,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:27:47,342 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:47,343 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:27:47,343 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:27:47,346 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:47,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:27:47,347 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,351 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,352 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,357 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,358 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,361 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:27:47,365 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:27:47,369 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:27:47,370 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698521, jitterRate=-0.11178575456142426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:27:47,376 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730982467269Initializing all the Stores at 1730982467271 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982467271Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982467272 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982467272Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982467272Cleaning up temporary data from old regions at 1730982467358 (+86 ms)Region opened successfully at 1730982467376 (+18 ms) 2024-11-07T12:27:47,378 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:27:47,416 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c8dc0fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:27:47,449 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:27:47,461 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:27:47,461 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:27:47,464 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:27:47,466 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-07T12:27:47,471 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-07T12:27:47,471 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:27:47,497 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:27:47,506 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:27:47,508 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:27:47,510 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:27:47,512 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:27:47,513 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:27:47,516 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:27:47,519 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:27:47,521 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:27:47,522 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:27:47,523 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:27:47,540 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:27:47,542 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:27:47,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:27:47,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:27:47,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:47,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:47,549 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87fb68174c52,34159,1730982466116, sessionid=0x1001a36599d0000, setting cluster-up flag (Was=false) 2024-11-07T12:27:47,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:47,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:47,567 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:27:47,569 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,34159,1730982466116 2024-11-07T12:27:47,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:47,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:47,581 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:27:47,583 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,34159,1730982466116 2024-11-07T12:27:47,589 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:27:47,667 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:27:47,672 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(746): ClusterId : 54453467-f894-4cf6-9004-0469685a67cb 2024-11-07T12:27:47,675 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:27:47,676 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:27:47,680 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:27:47,680 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:27:47,683 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:27:47,684 DEBUG [RS:0;87fb68174c52:43727 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ddbabe0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:27:47,685 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:27:47,690 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87fb68174c52,34159,1730982466116 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:27:47,697 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:27:47,697 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:27:47,697 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:27:47,698 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:27:47,698 DEBUG [RS:0;87fb68174c52:43727 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87fb68174c52:43727 2024-11-07T12:27:47,698 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87fb68174c52:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:27:47,698 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,698 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:27:47,698 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,700 INFO [RS:0;87fb68174c52:43727 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:27:47,701 INFO [RS:0;87fb68174c52:43727 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:27:47,701 DEBUG [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:27:47,702 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730982497702 2024-11-07T12:27:47,703 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:27:47,703 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(2659): reportForDuty to master=87fb68174c52,34159,1730982466116 with port=43727, startcode=1730982466783 2024-11-07T12:27:47,703 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:27:47,704 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:27:47,704 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:27:47,707 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:27:47,708 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:27:47,708 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:27:47,708 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:27:47,710 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:47,710 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:27:47,710 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,712 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:27:47,714 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:27:47,714 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:27:47,716 DEBUG [RS:0;87fb68174c52:43727 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:27:47,717 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:27:47,717 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:27:47,720 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982467719,5,FailOnTimeoutGroup] 2024-11-07T12:27:47,722 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982467721,5,FailOnTimeoutGroup] 2024-11-07T12:27:47,722 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,722 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:27:47,724 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,724 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:27:47,727 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:27:47,727 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865 2024-11-07T12:27:47,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:27:47,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:27:47,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:27:47,741 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:27:47,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:27:47,746 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:27:47,746 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:47,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:27:47,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:27:47,749 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:27:47,749 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:47,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:27:47,751 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:27:47,753 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:27:47,753 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:47,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:27:47,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:27:47,756 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:27:47,756 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:47,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:27:47,757 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:27:47,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740 2024-11-07T12:27:47,759 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740 2024-11-07T12:27:47,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:27:47,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:27:47,764 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:27:47,767 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:27:47,770 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:27:47,771 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733987, jitterRate=-0.06668771803379059}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:27:47,773 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730982467741Initializing all the Stores at 1730982467743 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982467743Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982467743Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982467743Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982467743Cleaning up temporary data from old regions at 1730982467764 (+21 ms)Region opened successfully at 1730982467773 (+9 ms) 2024-11-07T12:27:47,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:27:47,774 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:27:47,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:27:47,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:27:47,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:27:47,775 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:27:47,775 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982467774Disabling compacts and flushes for region at 1730982467774Disabling writes for close at 1730982467774Writing region close event to WAL at 1730982467775 (+1 ms)Closed at 1730982467775 2024-11-07T12:27:47,778 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:27:47,779 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:27:47,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:27:47,794 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:27:47,798 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:27:47,798 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46695, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:27:47,804 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34159 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87fb68174c52,43727,1730982466783 2024-11-07T12:27:47,806 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34159 {}] master.ServerManager(517): Registering regionserver=87fb68174c52,43727,1730982466783 2024-11-07T12:27:47,821 DEBUG [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865 2024-11-07T12:27:47,821 DEBUG [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35819 2024-11-07T12:27:47,821 DEBUG [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:27:47,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:27:47,826 DEBUG [RS:0;87fb68174c52:43727 {}] zookeeper.ZKUtil(111): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87fb68174c52,43727,1730982466783 2024-11-07T12:27:47,826 WARN [RS:0;87fb68174c52:43727 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:27:47,826 INFO [RS:0;87fb68174c52:43727 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:27:47,827 DEBUG [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783 2024-11-07T12:27:47,828 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87fb68174c52,43727,1730982466783] 2024-11-07T12:27:47,852 INFO [RS:0;87fb68174c52:43727 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:27:47,866 INFO [RS:0;87fb68174c52:43727 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:27:47,871 INFO [RS:0;87fb68174c52:43727 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:27:47,871 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,872 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:27:47,878 INFO [RS:0;87fb68174c52:43727 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:27:47,879 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,879 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,880 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,880 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,880 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,880 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,880 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:27:47,880 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,880 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,881 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,881 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,881 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,881 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:27:47,881 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:27:47,881 DEBUG [RS:0;87fb68174c52:43727 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:27:47,882 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,882 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,883 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,883 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,883 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,883 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,43727,1730982466783-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:27:47,902 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:27:47,903 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,43727,1730982466783-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,904 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,904 INFO [RS:0;87fb68174c52:43727 {}] regionserver.Replication(171): 87fb68174c52,43727,1730982466783 started 2024-11-07T12:27:47,922 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:47,922 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(1482): Serving as 87fb68174c52,43727,1730982466783, RpcServer on 87fb68174c52/172.17.0.3:43727, sessionid=0x1001a36599d0001 2024-11-07T12:27:47,923 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:27:47,923 DEBUG [RS:0;87fb68174c52:43727 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87fb68174c52,43727,1730982466783 2024-11-07T12:27:47,923 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,43727,1730982466783' 2024-11-07T12:27:47,923 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:27:47,924 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:27:47,925 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:27:47,925 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:27:47,925 DEBUG [RS:0;87fb68174c52:43727 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87fb68174c52,43727,1730982466783 2024-11-07T12:27:47,926 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,43727,1730982466783' 2024-11-07T12:27:47,926 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:27:47,926 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:27:47,927 DEBUG [RS:0;87fb68174c52:43727 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:27:47,927 INFO [RS:0;87fb68174c52:43727 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:27:47,927 INFO [RS:0;87fb68174c52:43727 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:27:47,949 WARN [87fb68174c52:34159 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:27:48,036 INFO [RS:0;87fb68174c52:43727 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C43727%2C1730982466783, suffix=, logDir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783, archiveDir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs, maxLogs=32 2024-11-07T12:27:48,038 INFO [RS:0;87fb68174c52:43727 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C43727%2C1730982466783.1730982468038 2024-11-07T12:27:48,048 INFO [RS:0;87fb68174c52:43727 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982468038 2024-11-07T12:27:48,053 DEBUG [RS:0;87fb68174c52:43727 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44959:44959),(127.0.0.1/127.0.0.1:38661:38661)] 2024-11-07T12:27:48,201 DEBUG [87fb68174c52:34159 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:27:48,213 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87fb68174c52,43727,1730982466783 2024-11-07T12:27:48,220 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,43727,1730982466783, state=OPENING 2024-11-07T12:27:48,225 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:27:48,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:48,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:27:48,228 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:27:48,228 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:27:48,229 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:27:48,231 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,43727,1730982466783}] 2024-11-07T12:27:48,406 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:27:48,410 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57939, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:27:48,420 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:27:48,421 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:27:48,424 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C43727%2C1730982466783.meta, suffix=.meta, logDir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783, archiveDir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs, maxLogs=32 2024-11-07T12:27:48,426 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C43727%2C1730982466783.meta.1730982468426.meta 2024-11-07T12:27:48,434 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.meta.1730982468426.meta 2024-11-07T12:27:48,436 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44959:44959),(127.0.0.1/127.0.0.1:38661:38661)] 2024-11-07T12:27:48,437 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:27:48,438 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:27:48,441 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:27:48,446 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:27:48,450 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:27:48,451 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:27:48,451 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:27:48,451 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:27:48,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:27:48,455 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:27:48,455 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:48,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:27:48,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:27:48,458 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:27:48,458 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:48,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:27:48,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:27:48,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:27:48,460 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:48,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:27:48,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:27:48,462 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:27:48,462 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:48,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:27:48,463 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:27:48,464 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740 2024-11-07T12:27:48,467 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740 2024-11-07T12:27:48,469 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:27:48,469 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:27:48,470 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:27:48,472 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:27:48,473 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774158, jitterRate=-0.015607327222824097}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:27:48,473 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:27:48,475 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730982468452Writing region info on filesystem at 1730982468452Initializing all the Stores at 1730982468453 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982468453Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982468454 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982468454Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982468454Cleaning up temporary data from old regions at 1730982468469 (+15 ms)Running coprocessor post-open hooks at 1730982468474 (+5 ms)Region opened successfully at 1730982468475 (+1 ms) 2024-11-07T12:27:48,482 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730982468397 2024-11-07T12:27:48,495 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:27:48,495 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:27:48,497 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,43727,1730982466783 2024-11-07T12:27:48,499 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,43727,1730982466783, state=OPEN 2024-11-07T12:27:48,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:27:48,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:27:48,503 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:27:48,503 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:27:48,504 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87fb68174c52,43727,1730982466783 2024-11-07T12:27:48,509 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:27:48,509 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,43727,1730982466783 in 273 msec 2024-11-07T12:27:48,517 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:27:48,517 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 726 msec 2024-11-07T12:27:48,518 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:27:48,518 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:27:48,539 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:27:48,540 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,43727,1730982466783, seqNum=-1] 2024-11-07T12:27:48,561 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:27:48,563 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53683, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:27:48,583 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 963 msec 2024-11-07T12:27:48,584 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730982468584, completionTime=-1 2024-11-07T12:27:48,586 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:27:48,586 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-07T12:27:48,614 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-07T12:27:48,614 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730982528614 2024-11-07T12:27:48,614 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730982588614 2024-11-07T12:27:48,614 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 27 msec 2024-11-07T12:27:48,617 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34159,1730982466116-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:48,617 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34159,1730982466116-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:48,617 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34159,1730982466116-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:48,619 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87fb68174c52:34159, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:48,619 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:48,620 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:48,626 DEBUG [master/87fb68174c52:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:27:48,646 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.725sec 2024-11-07T12:27:48,647 INFO [master/87fb68174c52:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:27:48,648 INFO [master/87fb68174c52:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:27:48,649 INFO [master/87fb68174c52:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:27:48,650 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:27:48,650 INFO [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:27:48,651 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34159,1730982466116-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:27:48,651 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34159,1730982466116-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:27:48,659 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:27:48,660 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:27:48,660 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34159,1730982466116-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:27:48,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e87ad9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:27:48,684 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-07T12:27:48,684 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-07T12:27:48,687 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87fb68174c52,34159,-1 for getting cluster id 2024-11-07T12:27:48,690 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:27:48,699 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '54453467-f894-4cf6-9004-0469685a67cb' 2024-11-07T12:27:48,701 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:27:48,702 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "54453467-f894-4cf6-9004-0469685a67cb" 2024-11-07T12:27:48,704 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d4cdb45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:27:48,704 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87fb68174c52,34159,-1] 2024-11-07T12:27:48,707 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:27:48,708 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:27:48,710 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37236, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:27:48,713 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd7b3c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:27:48,714 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:27:48,723 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,43727,1730982466783, seqNum=-1] 2024-11-07T12:27:48,723 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:27:48,726 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39928, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:27:48,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87fb68174c52,34159,1730982466116 2024-11-07T12:27:48,747 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:27:48,755 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:27:48,759 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T12:27:48,764 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 87fb68174c52,34159,1730982466116 2024-11-07T12:27:48,766 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@549cc41e 2024-11-07T12:27:48,767 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:27:48,770 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37244, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:27:48,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34159 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-07T12:27:48,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34159 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-07T12:27:48,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:27:48,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34159 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-07T12:27:48,786 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:27:48,788 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-07T12:27:48,789 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:48,791 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:27:48,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:27:48,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741835_1011 (size=389) 2024-11-07T12:27:48,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741835_1011 (size=389) 2024-11-07T12:27:48,837 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bc1a3d5d8ba8e4fa17158d22fbb472db, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865 2024-11-07T12:27:48,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741836_1012 (size=72) 2024-11-07T12:27:48,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741836_1012 (size=72) 2024-11-07T12:27:48,848 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:27:48,848 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing bc1a3d5d8ba8e4fa17158d22fbb472db, disabling compactions & flushes 2024-11-07T12:27:48,848 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:27:48,848 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:27:48,848 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. after waiting 0 ms 2024-11-07T12:27:48,848 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:27:48,848 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:27:48,848 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for bc1a3d5d8ba8e4fa17158d22fbb472db: Waiting for close lock at 1730982468848Disabling compacts and flushes for region at 1730982468848Disabling writes for close at 1730982468848Writing region close event to WAL at 1730982468848Closed at 1730982468848 2024-11-07T12:27:48,851 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:27:48,856 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1730982468851"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730982468851"}]},"ts":"1730982468851"} 2024-11-07T12:27:48,861 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T12:27:48,863 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:27:48,866 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730982468863"}]},"ts":"1730982468863"} 2024-11-07T12:27:48,870 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-07T12:27:48,872 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bc1a3d5d8ba8e4fa17158d22fbb472db, ASSIGN}] 2024-11-07T12:27:48,874 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bc1a3d5d8ba8e4fa17158d22fbb472db, ASSIGN 2024-11-07T12:27:48,876 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bc1a3d5d8ba8e4fa17158d22fbb472db, ASSIGN; state=OFFLINE, location=87fb68174c52,43727,1730982466783; forceNewPlan=false, retain=false 2024-11-07T12:27:49,027 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bc1a3d5d8ba8e4fa17158d22fbb472db, regionState=OPENING, regionLocation=87fb68174c52,43727,1730982466783 2024-11-07T12:27:49,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bc1a3d5d8ba8e4fa17158d22fbb472db, ASSIGN because future has completed 2024-11-07T12:27:49,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bc1a3d5d8ba8e4fa17158d22fbb472db, server=87fb68174c52,43727,1730982466783}] 2024-11-07T12:27:49,196 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:27:49,196 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bc1a3d5d8ba8e4fa17158d22fbb472db, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:27:49,197 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,197 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:27:49,197 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,197 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,200 INFO [StoreOpener-bc1a3d5d8ba8e4fa17158d22fbb472db-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,203 INFO [StoreOpener-bc1a3d5d8ba8e4fa17158d22fbb472db-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bc1a3d5d8ba8e4fa17158d22fbb472db columnFamilyName info 2024-11-07T12:27:49,203 DEBUG [StoreOpener-bc1a3d5d8ba8e4fa17158d22fbb472db-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:27:49,204 INFO [StoreOpener-bc1a3d5d8ba8e4fa17158d22fbb472db-1 {}] regionserver.HStore(327): Store=bc1a3d5d8ba8e4fa17158d22fbb472db/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:27:49,204 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,206 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,206 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,207 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,207 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,210 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,214 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:27:49,215 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bc1a3d5d8ba8e4fa17158d22fbb472db; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735405, jitterRate=-0.06488466262817383}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:27:49,215 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:27:49,216 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bc1a3d5d8ba8e4fa17158d22fbb472db: Running coprocessor pre-open hook at 1730982469197Writing region info on filesystem at 1730982469198 (+1 ms)Initializing all the Stores at 1730982469199 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982469199Cleaning up temporary data from old regions at 1730982469207 (+8 ms)Running coprocessor post-open hooks at 1730982469215 (+8 ms)Region opened successfully at 1730982469216 (+1 ms) 2024-11-07T12:27:49,218 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db., pid=6, masterSystemTime=1730982469188 2024-11-07T12:27:49,223 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:27:49,223 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:27:49,224 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bc1a3d5d8ba8e4fa17158d22fbb472db, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,43727,1730982466783 2024-11-07T12:27:49,228 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bc1a3d5d8ba8e4fa17158d22fbb472db, server=87fb68174c52,43727,1730982466783 because future has completed 2024-11-07T12:27:49,236 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:27:49,237 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bc1a3d5d8ba8e4fa17158d22fbb472db, server=87fb68174c52,43727,1730982466783 in 198 msec 2024-11-07T12:27:49,240 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:27:49,240 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bc1a3d5d8ba8e4fa17158d22fbb472db, ASSIGN in 364 msec 2024-11-07T12:27:49,242 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:27:49,243 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730982469242"}]},"ts":"1730982469242"} 2024-11-07T12:27:49,246 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-07T12:27:49,248 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:27:49,251 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 470 msec 2024-11-07T12:27:53,953 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T12:27:53,996 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:27:53,997 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-07T12:27:56,518 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T12:27:56,518 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-07T12:27:56,520 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-07T12:27:56,520 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-07T12:27:56,521 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:27:56,521 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-07T12:27:56,521 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-07T12:27:56,521 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-07T12:27:58,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:27:58,879 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-07T12:27:58,882 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-07T12:27:58,888 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-07T12:27:58,889 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:27:58,890 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C43727%2C1730982466783.1730982478889 2024-11-07T12:27:58,899 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:27:58,899 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:27:58,899 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:27:58,900 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:27:58,900 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:27:58,900 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982468038 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982478889 2024-11-07T12:27:58,902 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38661:38661),(127.0.0.1/127.0.0.1:44959:44959)] 2024-11-07T12:27:58,902 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982468038 is not closed yet, will try archiving it next time 2024-11-07T12:27:58,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741833_1009 (size=451) 2024-11-07T12:27:58,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741833_1009 (size=451) 2024-11-07T12:27:58,908 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982468038 to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs/87fb68174c52%2C43727%2C1730982466783.1730982468038 2024-11-07T12:27:58,910 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db., hostname=87fb68174c52,43727,1730982466783, seqNum=2] 2024-11-07T12:28:10,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43727 {}] regionserver.HRegion(8855): Flush requested on bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:28:10,944 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bc1a3d5d8ba8e4fa17158d22fbb472db 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:28:11,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/d3c04eb04b0b42f0adb23836f11180cf is 1080, key is row0001/info:/1730982478913/Put/seqid=0 2024-11-07T12:28:11,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741838_1014 (size=12509) 2024-11-07T12:28:11,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741838_1014 (size=12509) 2024-11-07T12:28:11,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/d3c04eb04b0b42f0adb23836f11180cf 2024-11-07T12:28:11,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/d3c04eb04b0b42f0adb23836f11180cf as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/d3c04eb04b0b42f0adb23836f11180cf 2024-11-07T12:28:11,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/d3c04eb04b0b42f0adb23836f11180cf, entries=7, sequenceid=11, filesize=12.2 K 2024-11-07T12:28:11,090 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bc1a3d5d8ba8e4fa17158d22fbb472db in 145ms, sequenceid=11, compaction requested=false 2024-11-07T12:28:11,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bc1a3d5d8ba8e4fa17158d22fbb472db: 2024-11-07T12:28:15,219 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:28:18,955 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C43727%2C1730982466783.1730982498954 2024-11-07T12:28:19,163 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:19,163 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:19,163 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:19,164 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:19,164 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:19,164 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:19,164 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982478889 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982498954 2024-11-07T12:28:19,165 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38661:38661),(127.0.0.1/127.0.0.1:44959:44959)] 2024-11-07T12:28:19,165 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982478889 is not closed yet, will try archiving it next time 2024-11-07T12:28:19,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741837_1013 (size=12399) 2024-11-07T12:28:19,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741837_1013 (size=12399) 2024-11-07T12:28:19,369 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:21,574 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:23,778 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:25,983 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:25,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43727 {}] regionserver.HRegion(8855): Flush requested on bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:28:25,983 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bc1a3d5d8ba8e4fa17158d22fbb472db 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:28:26,186 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:26,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/6d4631dd091349edbe538b25fa4d01e8 is 1080, key is row0008/info:/1730982492943/Put/seqid=0 2024-11-07T12:28:26,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741840_1016 (size=12509) 2024-11-07T12:28:26,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741840_1016 (size=12509) 2024-11-07T12:28:26,205 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/6d4631dd091349edbe538b25fa4d01e8 2024-11-07T12:28:26,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/6d4631dd091349edbe538b25fa4d01e8 as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/6d4631dd091349edbe538b25fa4d01e8 2024-11-07T12:28:26,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/6d4631dd091349edbe538b25fa4d01e8, entries=7, sequenceid=21, filesize=12.2 K 2024-11-07T12:28:26,428 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:26,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bc1a3d5d8ba8e4fa17158d22fbb472db in 445ms, sequenceid=21, compaction requested=false 2024-11-07T12:28:26,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bc1a3d5d8ba8e4fa17158d22fbb472db: 2024-11-07T12:28:26,429 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-07T12:28:26,429 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:28:26,430 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/d3c04eb04b0b42f0adb23836f11180cf because midkey is the same as first or last row 2024-11-07T12:28:28,188 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:28,668 INFO [master/87fb68174c52:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-07T12:28:28,668 INFO [master/87fb68174c52:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-07T12:28:30,392 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:30,394 WARN [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:30,396 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C43727%2C1730982466783:(num 1730982498954) roll requested 2024-11-07T12:28:30,396 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C43727%2C1730982466783.1730982510396 2024-11-07T12:28:30,609 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:30,609 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:30,609 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:30,609 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:30,610 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:30,610 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:30,610 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982498954 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982510396 2024-11-07T12:28:30,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741839_1015 (size=7739) 2024-11-07T12:28:30,613 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44959:44959),(127.0.0.1/127.0.0.1:38661:38661)] 2024-11-07T12:28:30,613 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982498954 is not closed yet, will try archiving it next time 2024-11-07T12:28:30,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741839_1015 (size=7739) 2024-11-07T12:28:30,613 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982478889 to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs/87fb68174c52%2C43727%2C1730982466783.1730982478889 2024-11-07T12:28:32,598 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK], DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK]] 2024-11-07T12:28:34,197 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bc1a3d5d8ba8e4fa17158d22fbb472db, had cached 0 bytes from a total of 25018 2024-11-07T12:28:34,802 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK], DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK]] 2024-11-07T12:28:37,007 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK], DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK]] 2024-11-07T12:28:39,212 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK], DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK]] 2024-11-07T12:28:41,214 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T12:28:41,214 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C43727%2C1730982466783.1730982521214 2024-11-07T12:28:45,220 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:28:46,224 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK], DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK]] 2024-11-07T12:28:46,226 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK], DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK]] 2024-11-07T12:28:46,226 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C43727%2C1730982466783:(num 1730982521214) roll requested 2024-11-07T12:28:46,226 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:46,227 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:46,227 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:46,227 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:46,227 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:46,227 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982510396 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982521214 2024-11-07T12:28:46,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741841_1017 (size=4753) 2024-11-07T12:28:46,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741841_1017 (size=4753) 2024-11-07T12:28:46,233 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38661:38661),(127.0.0.1/127.0.0.1:44959:44959)] 2024-11-07T12:28:46,233 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982510396 is not closed yet, will try archiving it next time 2024-11-07T12:28:46,233 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C43727%2C1730982466783.1730982526233 2024-11-07T12:28:51,237 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:51,237 WARN [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43727 {}] regionserver.HRegion(8855): Flush requested on bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:28:51,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bc1a3d5d8ba8e4fa17158d22fbb472db 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:28:51,244 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:51,245 WARN [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:53,238 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T12:28:56,239 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:56,240 WARN [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:28:56,240 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:56,240 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:56,240 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:56,240 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:56,240 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:28:56,241 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982521214 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982526233 2024-11-07T12:28:56,242 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38661:38661),(127.0.0.1/127.0.0.1:44959:44959)] 2024-11-07T12:28:56,242 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982521214 is not closed yet, will try archiving it next time 2024-11-07T12:28:56,242 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C43727%2C1730982466783:(num 1730982526233) roll requested 2024-11-07T12:28:56,242 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C43727%2C1730982466783.1730982536242 2024-11-07T12:28:56,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741842_1018 (size=1569) 2024-11-07T12:28:56,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741842_1018 (size=1569) 2024-11-07T12:28:56,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/fc4a669e6e094bed9fb35e79a84deed8 is 1080, key is row0015/info:/1730982507986/Put/seqid=0 2024-11-07T12:28:56,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741844_1020 (size=12509) 2024-11-07T12:28:56,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741844_1020 (size=12509) 2024-11-07T12:28:56,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/fc4a669e6e094bed9fb35e79a84deed8 2024-11-07T12:28:56,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/fc4a669e6e094bed9fb35e79a84deed8 as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/fc4a669e6e094bed9fb35e79a84deed8 2024-11-07T12:28:56,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/fc4a669e6e094bed9fb35e79a84deed8, entries=7, sequenceid=31, filesize=12.2 K 2024-11-07T12:29:01,250 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:29:01,250 WARN [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:29:01,275 INFO [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:29:01,275 WARN [FSHLog-0-hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865-prefix:87fb68174c52,43727,1730982466783 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42917,DS-fc8262cb-70bf-4db2-b9ac-e40a9d4304e7,DISK], DatanodeInfoWithStorage[127.0.0.1:37581,DS-a6283247-7f49-4600-89dc-44c0cfcd0a12,DISK]] 2024-11-07T12:29:01,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bc1a3d5d8ba8e4fa17158d22fbb472db in 10038ms, sequenceid=31, compaction requested=true 2024-11-07T12:29:01,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bc1a3d5d8ba8e4fa17158d22fbb472db: 2024-11-07T12:29:01,275 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,275 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,275 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-07T12:29:01,275 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,275 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:01,275 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,275 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/d3c04eb04b0b42f0adb23836f11180cf because midkey is the same as first or last row 2024-11-07T12:29:01,276 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,276 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982526233 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982536242 2024-11-07T12:29:01,277 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38661:38661),(127.0.0.1/127.0.0.1:44959:44959)] 2024-11-07T12:29:01,277 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982526233 is not closed yet, will try archiving it next time 2024-11-07T12:29:01,277 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982498954 to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs/87fb68174c52%2C43727%2C1730982466783.1730982498954 2024-11-07T12:29:01,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bc1a3d5d8ba8e4fa17158d22fbb472db:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:29:01,277 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C43727%2C1730982466783:(num 1730982541277) roll requested 2024-11-07T12:29:01,278 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C43727%2C1730982466783.1730982541277 2024-11-07T12:29:01,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741843_1019 (size=438) 2024-11-07T12:29:01,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:29:01,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741843_1019 (size=438) 2024-11-07T12:29:01,280 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982510396 to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs/87fb68174c52%2C43727%2C1730982466783.1730982510396 2024-11-07T12:29:01,280 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:29:01,282 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982521214 to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs/87fb68174c52%2C43727%2C1730982466783.1730982521214 2024-11-07T12:29:01,284 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982526233 to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs/87fb68174c52%2C43727%2C1730982466783.1730982526233 2024-11-07T12:29:01,284 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:29:01,286 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.HStore(1541): bc1a3d5d8ba8e4fa17158d22fbb472db/info is initiating minor compaction (all files) 2024-11-07T12:29:01,286 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,286 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,286 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,286 INFO [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bc1a3d5d8ba8e4fa17158d22fbb472db/info in TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:29:01,286 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,287 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,287 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982536242 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982541277 2024-11-07T12:29:01,287 INFO [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/d3c04eb04b0b42f0adb23836f11180cf, hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/6d4631dd091349edbe538b25fa4d01e8, hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/fc4a669e6e094bed9fb35e79a84deed8] into tmpdir=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp, totalSize=36.6 K 2024-11-07T12:29:01,288 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44959:44959),(127.0.0.1/127.0.0.1:38661:38661)] 2024-11-07T12:29:01,288 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982536242 is not closed yet, will try archiving it next time 2024-11-07T12:29:01,288 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C43727%2C1730982466783.1730982541288 2024-11-07T12:29:01,288 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3c04eb04b0b42f0adb23836f11180cf, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1730982478913 2024-11-07T12:29:01,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741845_1021 (size=93) 2024-11-07T12:29:01,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741845_1021 (size=93) 2024-11-07T12:29:01,293 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6d4631dd091349edbe538b25fa4d01e8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1730982492943 2024-11-07T12:29:01,293 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982536242 to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs/87fb68174c52%2C43727%2C1730982466783.1730982536242 2024-11-07T12:29:01,294 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] compactions.Compactor(225): Compacting fc4a669e6e094bed9fb35e79a84deed8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1730982507986 2024-11-07T12:29:01,306 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,306 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,306 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,306 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,306 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:01,307 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982541277 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/WALs/87fb68174c52,43727,1730982466783/87fb68174c52%2C43727%2C1730982466783.1730982541288 2024-11-07T12:29:01,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741846_1022 (size=1258) 2024-11-07T12:29:01,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741846_1022 (size=1258) 2024-11-07T12:29:01,311 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38661:38661),(127.0.0.1/127.0.0.1:44959:44959)] 2024-11-07T12:29:01,324 INFO [RS:0;87fb68174c52:43727-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bc1a3d5d8ba8e4fa17158d22fbb472db#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:29:01,326 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/041aa8ae306d4b9d9c3d2c49f998b430 is 1080, key is row0001/info:/1730982478913/Put/seqid=0 2024-11-07T12:29:01,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741848_1024 (size=27710) 2024-11-07T12:29:01,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741848_1024 (size=27710) 2024-11-07T12:29:01,351 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/041aa8ae306d4b9d9c3d2c49f998b430 as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/041aa8ae306d4b9d9c3d2c49f998b430 2024-11-07T12:29:01,369 INFO [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bc1a3d5d8ba8e4fa17158d22fbb472db/info of bc1a3d5d8ba8e4fa17158d22fbb472db into 041aa8ae306d4b9d9c3d2c49f998b430(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:29:01,369 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bc1a3d5d8ba8e4fa17158d22fbb472db: 2024-11-07T12:29:01,371 INFO [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db., storeName=bc1a3d5d8ba8e4fa17158d22fbb472db/info, priority=13, startTime=1730982541277; duration=0sec 2024-11-07T12:29:01,371 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-07T12:29:01,371 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:01,371 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/041aa8ae306d4b9d9c3d2c49f998b430 because midkey is the same as first or last row 2024-11-07T12:29:01,372 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-07T12:29:01,372 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:01,372 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/041aa8ae306d4b9d9c3d2c49f998b430 because midkey is the same as first or last row 2024-11-07T12:29:01,372 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-07T12:29:01,372 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:01,372 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/041aa8ae306d4b9d9c3d2c49f998b430 because midkey is the same as first or last row 2024-11-07T12:29:01,372 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:29:01,372 DEBUG [RS:0;87fb68174c52:43727-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bc1a3d5d8ba8e4fa17158d22fbb472db:info 2024-11-07T12:29:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43727 {}] regionserver.HRegion(8855): Flush requested on bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:29:13,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bc1a3d5d8ba8e4fa17158d22fbb472db 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:29:13,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/e5503ab36f784ec88c17f2df50d15733 is 1080, key is row0022/info:/1730982541290/Put/seqid=0 2024-11-07T12:29:13,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741849_1025 (size=12509) 2024-11-07T12:29:13,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741849_1025 (size=12509) 2024-11-07T12:29:13,326 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/e5503ab36f784ec88c17f2df50d15733 2024-11-07T12:29:13,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/e5503ab36f784ec88c17f2df50d15733 as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/e5503ab36f784ec88c17f2df50d15733 2024-11-07T12:29:13,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/e5503ab36f784ec88c17f2df50d15733, entries=7, sequenceid=42, filesize=12.2 K 2024-11-07T12:29:13,345 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bc1a3d5d8ba8e4fa17158d22fbb472db in 33ms, sequenceid=42, compaction requested=false 2024-11-07T12:29:13,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bc1a3d5d8ba8e4fa17158d22fbb472db: 2024-11-07T12:29:13,346 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-07T12:29:13,346 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:13,346 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/041aa8ae306d4b9d9c3d2c49f998b430 because midkey is the same as first or last row 2024-11-07T12:29:15,220 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:29:19,197 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bc1a3d5d8ba8e4fa17158d22fbb472db, had cached 0 bytes from a total of 40219 2024-11-07T12:29:21,324 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:29:21,324 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:29:21,324 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:29:21,330 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:21,330 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:21,330 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:29:21,330 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:29:21,330 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1904156029, stopped=false 2024-11-07T12:29:21,331 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87fb68174c52,34159,1730982466116 2024-11-07T12:29:21,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:29:21,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:29:21,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:21,333 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:29:21,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:21,333 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:29:21,334 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:29:21,334 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:29:21,334 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:29:21,334 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:21,334 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87fb68174c52,43727,1730982466783' ***** 2024-11-07T12:29:21,334 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:29:21,334 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:29:21,335 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:29:21,335 INFO [RS:0;87fb68174c52:43727 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:29:21,335 INFO [RS:0;87fb68174c52:43727 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:29:21,335 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(3091): Received CLOSE for bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:29:21,336 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(959): stopping server 87fb68174c52,43727,1730982466783 2024-11-07T12:29:21,336 INFO [RS:0;87fb68174c52:43727 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:29:21,336 INFO [RS:0;87fb68174c52:43727 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87fb68174c52:43727. 2024-11-07T12:29:21,336 DEBUG [RS:0;87fb68174c52:43727 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:29:21,336 DEBUG [RS:0;87fb68174c52:43727 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:21,336 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bc1a3d5d8ba8e4fa17158d22fbb472db, disabling compactions & flushes 2024-11-07T12:29:21,336 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:29:21,336 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:29:21,336 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. after waiting 0 ms 2024-11-07T12:29:21,337 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:29:21,337 INFO [RS:0;87fb68174c52:43727 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:29:21,337 INFO [RS:0;87fb68174c52:43727 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:29:21,337 INFO [RS:0;87fb68174c52:43727 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:29:21,337 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing bc1a3d5d8ba8e4fa17158d22fbb472db 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-07T12:29:21,337 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:29:21,337 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-07T12:29:21,337 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:29:21,337 DEBUG [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(1325): Online Regions={bc1a3d5d8ba8e4fa17158d22fbb472db=TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db., 1588230740=hbase:meta,,1.1588230740} 2024-11-07T12:29:21,337 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:29:21,337 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:29:21,337 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:29:21,338 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:29:21,338 DEBUG [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, bc1a3d5d8ba8e4fa17158d22fbb472db 2024-11-07T12:29:21,338 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-07T12:29:21,343 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/5d14196a91084b7e8a55d1a262975c8b is 1080, key is row0029/info:/1730982555314/Put/seqid=0 2024-11-07T12:29:21,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741850_1026 (size=8193) 2024-11-07T12:29:21,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741850_1026 (size=8193) 2024-11-07T12:29:21,352 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/5d14196a91084b7e8a55d1a262975c8b 2024-11-07T12:29:21,358 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/.tmp/info/4f1e51bb865642f981d98c745bb0f0cb is 195, key is TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db./info:regioninfo/1730982469224/Put/seqid=0 2024-11-07T12:29:21,362 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/.tmp/info/5d14196a91084b7e8a55d1a262975c8b as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/5d14196a91084b7e8a55d1a262975c8b 2024-11-07T12:29:21,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741851_1027 (size=7016) 2024-11-07T12:29:21,367 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/.tmp/info/4f1e51bb865642f981d98c745bb0f0cb 2024-11-07T12:29:21,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741851_1027 (size=7016) 2024-11-07T12:29:21,369 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/5d14196a91084b7e8a55d1a262975c8b, entries=3, sequenceid=48, filesize=8.0 K 2024-11-07T12:29:21,371 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for bc1a3d5d8ba8e4fa17158d22fbb472db in 34ms, sequenceid=48, compaction requested=true 2024-11-07T12:29:21,371 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/d3c04eb04b0b42f0adb23836f11180cf, hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/6d4631dd091349edbe538b25fa4d01e8, hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/fc4a669e6e094bed9fb35e79a84deed8] to archive 2024-11-07T12:29:21,374 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:29:21,377 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/d3c04eb04b0b42f0adb23836f11180cf to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/archive/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/d3c04eb04b0b42f0adb23836f11180cf 2024-11-07T12:29:21,379 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/6d4631dd091349edbe538b25fa4d01e8 to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/archive/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/6d4631dd091349edbe538b25fa4d01e8 2024-11-07T12:29:21,381 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/fc4a669e6e094bed9fb35e79a84deed8 to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/archive/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/info/fc4a669e6e094bed9fb35e79a84deed8 2024-11-07T12:29:21,392 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/.tmp/ns/14a2d8f6af1a4ec4bf3c63ddbda2c10f is 43, key is default/ns:d/1730982468568/Put/seqid=0 2024-11-07T12:29:21,393 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=87fb68174c52:34159 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-07T12:29:21,397 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d3c04eb04b0b42f0adb23836f11180cf=12509, 6d4631dd091349edbe538b25fa4d01e8=12509, fc4a669e6e094bed9fb35e79a84deed8=12509] 2024-11-07T12:29:21,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741852_1028 (size=5153) 2024-11-07T12:29:21,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741852_1028 (size=5153) 2024-11-07T12:29:21,399 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/.tmp/ns/14a2d8f6af1a4ec4bf3c63ddbda2c10f 2024-11-07T12:29:21,403 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/default/TestLogRolling-testSlowSyncLogRolling/bc1a3d5d8ba8e4fa17158d22fbb472db/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-07T12:29:21,405 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:29:21,405 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bc1a3d5d8ba8e4fa17158d22fbb472db: Waiting for close lock at 1730982561336Running coprocessor pre-close hooks at 1730982561336Disabling compacts and flushes for region at 1730982561336Disabling writes for close at 1730982561336Obtaining lock to block concurrent updates at 1730982561337 (+1 ms)Preparing flush snapshotting stores in bc1a3d5d8ba8e4fa17158d22fbb472db at 1730982561337Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1730982561337Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. at 1730982561338 (+1 ms)Flushing bc1a3d5d8ba8e4fa17158d22fbb472db/info: creating writer at 1730982561338Flushing bc1a3d5d8ba8e4fa17158d22fbb472db/info: appending metadata at 1730982561342 (+4 ms)Flushing bc1a3d5d8ba8e4fa17158d22fbb472db/info: closing flushed file at 1730982561343 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a7a7ee4: reopening flushed file at 1730982561361 (+18 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for bc1a3d5d8ba8e4fa17158d22fbb472db in 34ms, sequenceid=48, compaction requested=true at 1730982561371 (+10 ms)Writing region close event to WAL at 1730982561398 (+27 ms)Running coprocessor post-close hooks at 1730982561404 (+6 ms)Closed at 1730982561405 (+1 ms) 2024-11-07T12:29:21,406 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1730982468771.bc1a3d5d8ba8e4fa17158d22fbb472db. 2024-11-07T12:29:21,422 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/.tmp/table/1f2fd80dd8e54846a4430b7cfc959ee3 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1730982469242/Put/seqid=0 2024-11-07T12:29:21,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741853_1029 (size=5396) 2024-11-07T12:29:21,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741853_1029 (size=5396) 2024-11-07T12:29:21,428 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/.tmp/table/1f2fd80dd8e54846a4430b7cfc959ee3 2024-11-07T12:29:21,436 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/.tmp/info/4f1e51bb865642f981d98c745bb0f0cb as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/info/4f1e51bb865642f981d98c745bb0f0cb 2024-11-07T12:29:21,442 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/info/4f1e51bb865642f981d98c745bb0f0cb, entries=10, sequenceid=11, filesize=6.9 K 2024-11-07T12:29:21,444 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/.tmp/ns/14a2d8f6af1a4ec4bf3c63ddbda2c10f as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/ns/14a2d8f6af1a4ec4bf3c63ddbda2c10f 2024-11-07T12:29:21,450 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/ns/14a2d8f6af1a4ec4bf3c63ddbda2c10f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-07T12:29:21,452 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/.tmp/table/1f2fd80dd8e54846a4430b7cfc959ee3 as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/table/1f2fd80dd8e54846a4430b7cfc959ee3 2024-11-07T12:29:21,458 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/table/1f2fd80dd8e54846a4430b7cfc959ee3, entries=2, sequenceid=11, filesize=5.3 K 2024-11-07T12:29:21,460 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false 2024-11-07T12:29:21,465 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-07T12:29:21,466 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:29:21,466 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:29:21,466 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982561337Running coprocessor pre-close hooks at 1730982561337Disabling compacts and flushes for region at 1730982561337Disabling writes for close at 1730982561338 (+1 ms)Obtaining lock to block concurrent updates at 1730982561338Preparing flush snapshotting stores in 1588230740 at 1730982561338Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1730982561338Flushing stores of hbase:meta,,1.1588230740 at 1730982561339 (+1 ms)Flushing 1588230740/info: creating writer at 1730982561339Flushing 1588230740/info: appending metadata at 1730982561358 (+19 ms)Flushing 1588230740/info: closing flushed file at 1730982561358Flushing 1588230740/ns: creating writer at 1730982561374 (+16 ms)Flushing 1588230740/ns: appending metadata at 1730982561392 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1730982561392Flushing 1588230740/table: creating writer at 1730982561407 (+15 ms)Flushing 1588230740/table: appending metadata at 1730982561421 (+14 ms)Flushing 1588230740/table: closing flushed file at 1730982561422 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23588bde: reopening flushed file at 1730982561435 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b84d881: reopening flushed file at 1730982561443 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43ad1162: reopening flushed file at 1730982561451 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false at 1730982561460 (+9 ms)Writing region close event to WAL at 1730982561461 (+1 ms)Running coprocessor post-close hooks at 1730982561466 (+5 ms)Closed at 1730982561466 2024-11-07T12:29:21,466 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:29:21,538 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(976): stopping server 87fb68174c52,43727,1730982466783; all regions closed. 2024-11-07T12:29:21,540 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,540 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,540 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,540 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,540 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741834_1010 (size=3066) 2024-11-07T12:29:21,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741834_1010 (size=3066) 2024-11-07T12:29:21,547 DEBUG [RS:0;87fb68174c52:43727 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs 2024-11-07T12:29:21,547 INFO [RS:0;87fb68174c52:43727 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C43727%2C1730982466783.meta:.meta(num 1730982468426) 2024-11-07T12:29:21,547 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,547 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,547 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,547 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,547 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741847_1023 (size=12695) 2024-11-07T12:29:21,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741847_1023 (size=12695) 2024-11-07T12:29:21,553 DEBUG [RS:0;87fb68174c52:43727 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/oldWALs 2024-11-07T12:29:21,553 INFO [RS:0;87fb68174c52:43727 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C43727%2C1730982466783:(num 1730982541288) 2024-11-07T12:29:21,553 DEBUG [RS:0;87fb68174c52:43727 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:21,553 INFO [RS:0;87fb68174c52:43727 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:29:21,554 INFO [RS:0;87fb68174c52:43727 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:29:21,554 INFO [RS:0;87fb68174c52:43727 {}] hbase.ChoreService(370): Chore service for: regionserver/87fb68174c52:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-07T12:29:21,554 INFO [RS:0;87fb68174c52:43727 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:29:21,554 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:29:21,554 INFO [RS:0;87fb68174c52:43727 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43727 2024-11-07T12:29:21,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:29:21,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87fb68174c52,43727,1730982466783 2024-11-07T12:29:21,558 INFO [RS:0;87fb68174c52:43727 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:29:21,559 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87fb68174c52,43727,1730982466783] 2024-11-07T12:29:21,561 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87fb68174c52,43727,1730982466783 already deleted, retry=false 2024-11-07T12:29:21,561 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87fb68174c52,43727,1730982466783 expired; onlineServers=0 2024-11-07T12:29:21,562 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87fb68174c52,34159,1730982466116' ***** 2024-11-07T12:29:21,562 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:29:21,562 INFO [M:0;87fb68174c52:34159 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:29:21,562 INFO [M:0;87fb68174c52:34159 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:29:21,562 DEBUG [M:0;87fb68174c52:34159 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:29:21,562 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:29:21,562 DEBUG [M:0;87fb68174c52:34159 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:29:21,562 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982467719 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982467719,5,FailOnTimeoutGroup] 2024-11-07T12:29:21,562 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982467721 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982467721,5,FailOnTimeoutGroup] 2024-11-07T12:29:21,562 INFO [M:0;87fb68174c52:34159 {}] hbase.ChoreService(370): Chore service for: master/87fb68174c52:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:29:21,562 INFO [M:0;87fb68174c52:34159 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:29:21,563 DEBUG [M:0;87fb68174c52:34159 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:29:21,563 INFO [M:0;87fb68174c52:34159 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:29:21,563 INFO [M:0;87fb68174c52:34159 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:29:21,563 INFO [M:0;87fb68174c52:34159 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:29:21,563 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:29:21,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:29:21,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:21,565 DEBUG [M:0;87fb68174c52:34159 {}] zookeeper.ZKUtil(347): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:29:21,565 WARN [M:0;87fb68174c52:34159 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:29:21,565 INFO [M:0;87fb68174c52:34159 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/.lastflushedseqids 2024-11-07T12:29:21,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741854_1030 (size=130) 2024-11-07T12:29:21,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741854_1030 (size=130) 2024-11-07T12:29:21,577 INFO [M:0;87fb68174c52:34159 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:29:21,578 INFO [M:0;87fb68174c52:34159 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:29:21,578 DEBUG [M:0;87fb68174c52:34159 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:29:21,578 INFO [M:0;87fb68174c52:34159 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:21,578 DEBUG [M:0;87fb68174c52:34159 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:21,578 DEBUG [M:0;87fb68174c52:34159 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:29:21,578 DEBUG [M:0;87fb68174c52:34159 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:21,578 INFO [M:0;87fb68174c52:34159 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-07T12:29:21,595 DEBUG [M:0;87fb68174c52:34159 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4144686db32a470295d3df71d8ec7111 is 82, key is hbase:meta,,1/info:regioninfo/1730982468496/Put/seqid=0 2024-11-07T12:29:21,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741855_1031 (size=5672) 2024-11-07T12:29:21,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741855_1031 (size=5672) 2024-11-07T12:29:21,602 INFO [M:0;87fb68174c52:34159 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4144686db32a470295d3df71d8ec7111 2024-11-07T12:29:21,623 DEBUG [M:0;87fb68174c52:34159 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d53d1790d9ad4cb3ab3095071f48ebd1 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730982469250/Put/seqid=0 2024-11-07T12:29:21,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741856_1032 (size=6248) 2024-11-07T12:29:21,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741856_1032 (size=6248) 2024-11-07T12:29:21,630 INFO [M:0;87fb68174c52:34159 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d53d1790d9ad4cb3ab3095071f48ebd1 2024-11-07T12:29:21,635 INFO [M:0;87fb68174c52:34159 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d53d1790d9ad4cb3ab3095071f48ebd1 2024-11-07T12:29:21,657 DEBUG [M:0;87fb68174c52:34159 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3f5998a56ed44597b91075fe15dae98d is 69, key is 87fb68174c52,43727,1730982466783/rs:state/1730982467809/Put/seqid=0 2024-11-07T12:29:21,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:29:21,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43727-0x1001a36599d0001, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:29:21,662 INFO [RS:0;87fb68174c52:43727 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:29:21,662 INFO [RS:0;87fb68174c52:43727 {}] regionserver.HRegionServer(1031): Exiting; stopping=87fb68174c52,43727,1730982466783; zookeeper connection closed. 2024-11-07T12:29:21,662 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@429b85d2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@429b85d2 2024-11-07T12:29:21,663 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:29:21,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741857_1033 (size=5156) 2024-11-07T12:29:21,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741857_1033 (size=5156) 2024-11-07T12:29:21,664 INFO [M:0;87fb68174c52:34159 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3f5998a56ed44597b91075fe15dae98d 2024-11-07T12:29:21,686 DEBUG [M:0;87fb68174c52:34159 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/08706ab6a92243cf88f87a6b25615504 is 52, key is load_balancer_on/state:d/1730982468751/Put/seqid=0 2024-11-07T12:29:21,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741858_1034 (size=5056) 2024-11-07T12:29:21,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741858_1034 (size=5056) 2024-11-07T12:29:21,693 INFO [M:0;87fb68174c52:34159 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/08706ab6a92243cf88f87a6b25615504 2024-11-07T12:29:21,699 DEBUG [M:0;87fb68174c52:34159 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4144686db32a470295d3df71d8ec7111 as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4144686db32a470295d3df71d8ec7111 2024-11-07T12:29:21,705 INFO [M:0;87fb68174c52:34159 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4144686db32a470295d3df71d8ec7111, entries=8, sequenceid=59, filesize=5.5 K 2024-11-07T12:29:21,706 DEBUG [M:0;87fb68174c52:34159 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d53d1790d9ad4cb3ab3095071f48ebd1 as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d53d1790d9ad4cb3ab3095071f48ebd1 2024-11-07T12:29:21,712 INFO [M:0;87fb68174c52:34159 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d53d1790d9ad4cb3ab3095071f48ebd1 2024-11-07T12:29:21,712 INFO [M:0;87fb68174c52:34159 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d53d1790d9ad4cb3ab3095071f48ebd1, entries=6, sequenceid=59, filesize=6.1 K 2024-11-07T12:29:21,713 DEBUG [M:0;87fb68174c52:34159 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3f5998a56ed44597b91075fe15dae98d as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3f5998a56ed44597b91075fe15dae98d 2024-11-07T12:29:21,719 INFO [M:0;87fb68174c52:34159 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3f5998a56ed44597b91075fe15dae98d, entries=1, sequenceid=59, filesize=5.0 K 2024-11-07T12:29:21,720 DEBUG [M:0;87fb68174c52:34159 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/08706ab6a92243cf88f87a6b25615504 as hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/08706ab6a92243cf88f87a6b25615504 2024-11-07T12:29:21,726 INFO [M:0;87fb68174c52:34159 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/08706ab6a92243cf88f87a6b25615504, entries=1, sequenceid=59, filesize=4.9 K 2024-11-07T12:29:21,727 INFO [M:0;87fb68174c52:34159 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=59, compaction requested=false 2024-11-07T12:29:21,729 INFO [M:0;87fb68174c52:34159 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:21,729 DEBUG [M:0;87fb68174c52:34159 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982561578Disabling compacts and flushes for region at 1730982561578Disabling writes for close at 1730982561578Obtaining lock to block concurrent updates at 1730982561578Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730982561578Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1730982561579 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730982561580 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730982561580Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730982561595 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730982561595Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730982561607 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730982561623 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730982561623Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730982561635 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730982561656 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730982561656Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730982561671 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730982561686 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730982561686Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6062c886: reopening flushed file at 1730982561698 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ad58ec7: reopening flushed file at 1730982561705 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@396ea554: reopening flushed file at 1730982561712 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e4e5d21: reopening flushed file at 1730982561719 (+7 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=59, compaction requested=false at 1730982561727 (+8 ms)Writing region close event to WAL at 1730982561729 (+2 ms)Closed at 1730982561729 2024-11-07T12:29:21,730 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,730 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,730 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,730 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,730 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:21,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42917 is added to blk_1073741830_1006 (size=27985) 2024-11-07T12:29:21,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37581 is added to blk_1073741830_1006 (size=27985) 2024-11-07T12:29:21,733 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:29:21,734 INFO [M:0;87fb68174c52:34159 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:29:21,734 INFO [M:0;87fb68174c52:34159 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34159 2024-11-07T12:29:21,734 INFO [M:0;87fb68174c52:34159 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:29:21,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:29:21,836 INFO [M:0;87fb68174c52:34159 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:29:21,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34159-0x1001a36599d0000, quorum=127.0.0.1:49916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:29:21,840 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:21,843 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:29:21,843 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:29:21,843 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:29:21,843 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/hadoop.log.dir/,STOPPED} 2024-11-07T12:29:21,846 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:29:21,846 WARN [BP-1757488485-172.17.0.3-1730982463262 heartbeating to localhost/127.0.0.1:35819 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:29:21,846 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:29:21,846 WARN [BP-1757488485-172.17.0.3-1730982463262 heartbeating to localhost/127.0.0.1:35819 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1757488485-172.17.0.3-1730982463262 (Datanode Uuid 82765e69-58f6-4989-82a4-6fc772a0fd26) service to localhost/127.0.0.1:35819 2024-11-07T12:29:21,847 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/data/data3/current/BP-1757488485-172.17.0.3-1730982463262 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:21,847 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/data/data4/current/BP-1757488485-172.17.0.3-1730982463262 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:21,848 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:29:21,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:21,852 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:29:21,852 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:29:21,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:29:21,853 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/hadoop.log.dir/,STOPPED} 2024-11-07T12:29:21,854 WARN [BP-1757488485-172.17.0.3-1730982463262 heartbeating to localhost/127.0.0.1:35819 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:29:21,854 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:29:21,854 WARN [BP-1757488485-172.17.0.3-1730982463262 heartbeating to localhost/127.0.0.1:35819 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1757488485-172.17.0.3-1730982463262 (Datanode Uuid 9a3704d1-8ad0-4329-9ca2-c2c3274aabf5) service to localhost/127.0.0.1:35819 2024-11-07T12:29:21,854 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:29:21,855 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/data/data1/current/BP-1757488485-172.17.0.3-1730982463262 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:21,855 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/cluster_13874df7-057f-48fa-b1e4-f00caab0ce89/data/data2/current/BP-1757488485-172.17.0.3-1730982463262 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:21,856 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:29:21,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:29:21,866 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:29:21,866 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:29:21,866 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:29:21,866 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/hadoop.log.dir/,STOPPED} 2024-11-07T12:29:21,876 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:29:21,887 INFO [regionserver/87fb68174c52:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:29:21,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:29:21,920 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35819 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/87fb68174c52:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/87fb68174c52:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@18e1792d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35819 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35819 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35819 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35819 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35819 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35819 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35819 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/87fb68174c52:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=106 (was 161), ProcessCount=11 (was 11), AvailableMemoryMB=7016 (was 7069) 2024-11-07T12:29:21,926 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=106, ProcessCount=11, AvailableMemoryMB=7016 2024-11-07T12:29:21,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:29:21,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/hadoop.log.dir so I do NOT create it in target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446 2024-11-07T12:29:21,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c896ff0a-9609-eb28-fbaa-44cb1fb32fab/hadoop.tmp.dir so I do NOT create it in target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446 2024-11-07T12:29:21,927 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e, deleteOnExit=true 2024-11-07T12:29:21,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:29:21,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/test.cache.data in system properties and HBase conf 2024-11-07T12:29:21,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:29:21,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:29:21,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:29:21,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:29:21,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:29:21,928 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:29:21,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:29:21,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:29:21,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:29:21,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:29:21,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:29:21,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:29:21,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:29:21,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:29:21,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:29:21,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:29:21,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:29:21,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:29:21,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:29:21,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:29:21,942 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:29:22,009 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:22,015 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:29:22,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:29:22,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:29:22,016 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:29:22,017 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:22,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59505eb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:29:22,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f681677{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:29:22,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b8ef2ff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/java.io.tmpdir/jetty-localhost-38299-hadoop-hdfs-3_4_1-tests_jar-_-any-3746326485977266861/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:29:22,133 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10885b70{HTTP/1.1, (http/1.1)}{localhost:38299} 2024-11-07T12:29:22,133 INFO [Time-limited test {}] server.Server(415): Started @100806ms 2024-11-07T12:29:22,147 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:29:22,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:22,217 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:29:22,218 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:29:22,218 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:29:22,218 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:29:22,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@469dec96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:29:22,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@eab7acc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:29:22,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2152d149{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/java.io.tmpdir/jetty-localhost-38655-hadoop-hdfs-3_4_1-tests_jar-_-any-1333457454703953446/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:22,344 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d281952{HTTP/1.1, (http/1.1)}{localhost:38655} 2024-11-07T12:29:22,344 INFO [Time-limited test {}] server.Server(415): Started @101017ms 2024-11-07T12:29:22,346 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:29:22,384 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:22,389 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:29:22,390 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:29:22,390 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:29:22,390 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:29:22,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@274298f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:29:22,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1be80f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:29:22,433 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/data/data1/current/BP-1850490054-172.17.0.3-1730982561961/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:22,434 WARN [Thread-440 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/data/data2/current/BP-1850490054-172.17.0.3-1730982561961/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:22,458 WARN [Thread-418 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:29:22,462 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab86eee673d69f18 with lease ID 0xe9222459430a189d: Processing first storage report for DS-a71dd030-a6d4-44fe-a6c9-3cbacbaa5958 from datanode DatanodeRegistration(127.0.0.1:42235, datanodeUuid=9159dc2b-cba6-4207-bbe7-94819d267dd5, infoPort=45249, infoSecurePort=0, ipcPort=41659, storageInfo=lv=-57;cid=testClusterID;nsid=590469380;c=1730982561961) 2024-11-07T12:29:22,462 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab86eee673d69f18 with lease ID 0xe9222459430a189d: from storage DS-a71dd030-a6d4-44fe-a6c9-3cbacbaa5958 node DatanodeRegistration(127.0.0.1:42235, datanodeUuid=9159dc2b-cba6-4207-bbe7-94819d267dd5, infoPort=45249, infoSecurePort=0, ipcPort=41659, storageInfo=lv=-57;cid=testClusterID;nsid=590469380;c=1730982561961), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:29:22,462 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab86eee673d69f18 with lease ID 0xe9222459430a189d: Processing first storage report for DS-9efcbc12-9413-46ec-b68c-b06b2e99c6bb from datanode DatanodeRegistration(127.0.0.1:42235, datanodeUuid=9159dc2b-cba6-4207-bbe7-94819d267dd5, infoPort=45249, infoSecurePort=0, ipcPort=41659, storageInfo=lv=-57;cid=testClusterID;nsid=590469380;c=1730982561961) 2024-11-07T12:29:22,462 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab86eee673d69f18 with lease ID 0xe9222459430a189d: from storage DS-9efcbc12-9413-46ec-b68c-b06b2e99c6bb node DatanodeRegistration(127.0.0.1:42235, datanodeUuid=9159dc2b-cba6-4207-bbe7-94819d267dd5, infoPort=45249, infoSecurePort=0, ipcPort=41659, storageInfo=lv=-57;cid=testClusterID;nsid=590469380;c=1730982561961), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:22,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6838bf55{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/java.io.tmpdir/jetty-localhost-42813-hadoop-hdfs-3_4_1-tests_jar-_-any-3742593584765295921/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:22,513 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@254e0164{HTTP/1.1, (http/1.1)}{localhost:42813} 2024-11-07T12:29:22,513 INFO [Time-limited test {}] server.Server(415): Started @101186ms 2024-11-07T12:29:22,515 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:29:22,605 WARN [Thread-466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/data/data4/current/BP-1850490054-172.17.0.3-1730982561961/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:22,605 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/data/data3/current/BP-1850490054-172.17.0.3-1730982561961/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:22,622 WARN [Thread-454 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:29:22,625 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x271391570963ceb0 with lease ID 0xe9222459430a189e: Processing first storage report for DS-931dd651-ba75-4978-a5b5-d2e61521bb45 from datanode DatanodeRegistration(127.0.0.1:40487, datanodeUuid=7488632a-7fa5-4840-b8e9-e635c297f9ab, infoPort=32905, infoSecurePort=0, ipcPort=46459, storageInfo=lv=-57;cid=testClusterID;nsid=590469380;c=1730982561961) 2024-11-07T12:29:22,626 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x271391570963ceb0 with lease ID 0xe9222459430a189e: from storage DS-931dd651-ba75-4978-a5b5-d2e61521bb45 node DatanodeRegistration(127.0.0.1:40487, datanodeUuid=7488632a-7fa5-4840-b8e9-e635c297f9ab, infoPort=32905, infoSecurePort=0, ipcPort=46459, storageInfo=lv=-57;cid=testClusterID;nsid=590469380;c=1730982561961), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:22,626 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x271391570963ceb0 with lease ID 0xe9222459430a189e: Processing first storage report for DS-e0327cee-486f-4665-8349-50bd6af9d87a from datanode DatanodeRegistration(127.0.0.1:40487, datanodeUuid=7488632a-7fa5-4840-b8e9-e635c297f9ab, infoPort=32905, infoSecurePort=0, ipcPort=46459, storageInfo=lv=-57;cid=testClusterID;nsid=590469380;c=1730982561961) 2024-11-07T12:29:22,626 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x271391570963ceb0 with lease ID 0xe9222459430a189e: from storage DS-e0327cee-486f-4665-8349-50bd6af9d87a node DatanodeRegistration(127.0.0.1:40487, datanodeUuid=7488632a-7fa5-4840-b8e9-e635c297f9ab, infoPort=32905, infoSecurePort=0, ipcPort=46459, storageInfo=lv=-57;cid=testClusterID;nsid=590469380;c=1730982561961), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:22,646 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446 2024-11-07T12:29:22,649 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/zookeeper_0, clientPort=49934, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:29:22,650 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49934 2024-11-07T12:29:22,650 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:22,652 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:22,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:29:22,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:29:22,663 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb with version=8 2024-11-07T12:29:22,663 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/hbase-staging 2024-11-07T12:29:22,666 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:29:22,666 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:22,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:22,667 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:29:22,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:22,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:29:22,667 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:29:22,667 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:29:22,668 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36049 2024-11-07T12:29:22,670 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36049 connecting to ZooKeeper ensemble=127.0.0.1:49934 2024-11-07T12:29:22,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:360490x0, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:29:22,676 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36049-0x1001a37d5f70000 connected 2024-11-07T12:29:22,696 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:22,698 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:22,702 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:29:22,702 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb, hbase.cluster.distributed=false 2024-11-07T12:29:22,704 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:29:22,705 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36049 2024-11-07T12:29:22,705 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36049 2024-11-07T12:29:22,705 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36049 2024-11-07T12:29:22,705 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36049 2024-11-07T12:29:22,706 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36049 2024-11-07T12:29:22,722 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:29:22,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:22,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:22,723 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:29:22,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:22,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:29:22,723 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:29:22,723 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:29:22,724 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40433 2024-11-07T12:29:22,725 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40433 connecting to ZooKeeper ensemble=127.0.0.1:49934 2024-11-07T12:29:22,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:22,728 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:22,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:404330x0, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:29:22,734 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:29:22,734 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40433-0x1001a37d5f70001 connected 2024-11-07T12:29:22,734 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:29:22,736 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:29:22,737 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:29:22,738 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:29:22,740 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40433 2024-11-07T12:29:22,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40433 2024-11-07T12:29:22,744 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40433 2024-11-07T12:29:22,745 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40433 2024-11-07T12:29:22,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40433 2024-11-07T12:29:22,759 DEBUG [M:0;87fb68174c52:36049 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87fb68174c52:36049 2024-11-07T12:29:22,759 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87fb68174c52,36049,1730982562666 2024-11-07T12:29:22,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:29:22,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:29:22,762 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87fb68174c52,36049,1730982562666 2024-11-07T12:29:22,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:29:22,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:22,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:22,765 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:29:22,766 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87fb68174c52,36049,1730982562666 from backup master directory 2024-11-07T12:29:22,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87fb68174c52,36049,1730982562666 2024-11-07T12:29:22,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:29:22,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:29:22,767 WARN [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:29:22,767 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87fb68174c52,36049,1730982562666 2024-11-07T12:29:22,772 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/hbase.id] with ID: a533cdc5-78fc-48f3-83ea-49614717ba1d 2024-11-07T12:29:22,773 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/.tmp/hbase.id 2024-11-07T12:29:22,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:29:22,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:29:22,781 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/.tmp/hbase.id]:[hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/hbase.id] 2024-11-07T12:29:22,796 INFO [master/87fb68174c52:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:22,797 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:29:22,798 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-07T12:29:22,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:22,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:22,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:29:22,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:29:22,811 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:29:22,812 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:29:22,813 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:29:22,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:29:22,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:29:22,826 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store 2024-11-07T12:29:22,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:29:22,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:29:22,839 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:29:22,839 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:29:22,839 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:22,839 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:22,839 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:29:22,840 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:22,840 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:22,840 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982562839Disabling compacts and flushes for region at 1730982562839Disabling writes for close at 1730982562840 (+1 ms)Writing region close event to WAL at 1730982562840Closed at 1730982562840 2024-11-07T12:29:22,841 WARN [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/.initializing 2024-11-07T12:29:22,842 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/WALs/87fb68174c52,36049,1730982562666 2024-11-07T12:29:22,846 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C36049%2C1730982562666, suffix=, logDir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/WALs/87fb68174c52,36049,1730982562666, archiveDir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/oldWALs, maxLogs=10 2024-11-07T12:29:22,846 INFO [master/87fb68174c52:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C36049%2C1730982562666.1730982562846 2024-11-07T12:29:22,853 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/WALs/87fb68174c52,36049,1730982562666/87fb68174c52%2C36049%2C1730982562666.1730982562846 2024-11-07T12:29:22,857 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45249:45249),(127.0.0.1/127.0.0.1:32905:32905)] 2024-11-07T12:29:22,857 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:29:22,858 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:29:22,858 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,858 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:29:22,862 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:22,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:22,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:29:22,865 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:22,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:29:22,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:29:22,868 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:22,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:29:22,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:29:22,870 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:22,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:29:22,870 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,871 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,872 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,873 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,873 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,874 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:29:22,875 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:22,878 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:29:22,878 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747773, jitterRate=-0.04915757477283478}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:29:22,879 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730982562858Initializing all the Stores at 1730982562859 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982562859Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982562860 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982562860Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982562860Cleaning up temporary data from old regions at 1730982562873 (+13 ms)Region opened successfully at 1730982562879 (+6 ms) 2024-11-07T12:29:22,880 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:29:22,885 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@530b7d42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:29:22,886 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:29:22,886 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:29:22,886 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:29:22,886 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:29:22,887 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:29:22,887 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:29:22,887 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:29:22,890 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:29:22,891 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:29:22,892 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:29:22,892 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:29:22,893 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:29:22,894 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:29:22,895 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:29:22,896 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:29:22,897 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:29:22,898 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:29:22,899 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:29:22,901 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:29:22,903 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:29:22,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:29:22,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:29:22,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:22,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:22,905 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87fb68174c52,36049,1730982562666, sessionid=0x1001a37d5f70000, setting cluster-up flag (Was=false) 2024-11-07T12:29:22,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:22,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:22,914 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:29:22,915 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,36049,1730982562666 2024-11-07T12:29:22,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:22,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:22,923 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:29:22,924 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,36049,1730982562666 2024-11-07T12:29:22,925 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:29:22,927 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:29:22,928 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:29:22,928 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:29:22,928 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87fb68174c52,36049,1730982562666 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:29:22,930 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:29:22,930 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:29:22,930 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:29:22,930 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:29:22,930 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87fb68174c52:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:29:22,930 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,930 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:29:22,930 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,931 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730982592931 2024-11-07T12:29:22,931 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:29:22,931 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:29:22,932 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:29:22,932 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:29:22,932 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:29:22,932 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:29:22,932 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:22,932 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:29:22,932 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:29:22,932 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:29:22,933 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:29:22,933 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:29:22,933 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:29:22,933 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:29:22,934 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982562933,5,FailOnTimeoutGroup] 2024-11-07T12:29:22,934 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:22,934 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:29:22,935 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982562934,5,FailOnTimeoutGroup] 2024-11-07T12:29:22,935 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:22,935 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:29:22,935 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:22,935 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:22,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:29:22,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:29:22,943 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:29:22,943 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb 2024-11-07T12:29:22,950 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(746): ClusterId : a533cdc5-78fc-48f3-83ea-49614717ba1d 2024-11-07T12:29:22,950 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:29:22,953 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:29:22,953 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:29:22,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:29:22,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:29:22,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:29:22,955 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:29:22,956 DEBUG [RS:0;87fb68174c52:40433 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f00d9b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:29:22,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:29:22,958 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:29:22,958 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:22,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:22,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:29:22,961 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:29:22,961 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:22,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:22,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:29:22,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:29:22,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:22,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:22,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:29:22,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:29:22,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:22,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:22,967 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:29:22,967 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740 2024-11-07T12:29:22,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740 2024-11-07T12:29:22,969 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:29:22,969 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:29:22,970 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:29:22,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:29:22,972 DEBUG [RS:0;87fb68174c52:40433 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87fb68174c52:40433 2024-11-07T12:29:22,972 INFO [RS:0;87fb68174c52:40433 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:29:22,972 INFO [RS:0;87fb68174c52:40433 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:29:22,972 DEBUG [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:29:22,973 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(2659): reportForDuty to master=87fb68174c52,36049,1730982562666 with port=40433, startcode=1730982562722 2024-11-07T12:29:22,974 DEBUG [RS:0;87fb68174c52:40433 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:29:22,975 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:29:22,975 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822986, jitterRate=0.04648098349571228}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:29:22,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730982562955Initializing all the Stores at 1730982562956 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982562956Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982562956Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982562956Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982562956Cleaning up temporary data from old regions at 1730982562969 (+13 ms)Region opened successfully at 1730982562977 (+8 ms) 2024-11-07T12:29:22,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:29:22,977 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:29:22,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:29:22,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:29:22,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:29:22,977 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58597, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:29:22,978 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:29:22,978 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982562977Disabling compacts and flushes for region at 1730982562977Disabling writes for close at 1730982562977Writing region close event to WAL at 1730982562977Closed at 1730982562978 (+1 ms) 2024-11-07T12:29:22,978 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36049 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87fb68174c52,40433,1730982562722 2024-11-07T12:29:22,978 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36049 {}] master.ServerManager(517): Registering regionserver=87fb68174c52,40433,1730982562722 2024-11-07T12:29:22,980 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:29:22,980 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:29:22,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:29:22,981 DEBUG [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb 2024-11-07T12:29:22,981 DEBUG [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46863 2024-11-07T12:29:22,981 DEBUG [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:29:22,982 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:29:22,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:29:22,984 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:29:22,984 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87fb68174c52,40433,1730982562722] 2024-11-07T12:29:22,984 DEBUG [RS:0;87fb68174c52:40433 {}] zookeeper.ZKUtil(111): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87fb68174c52,40433,1730982562722 2024-11-07T12:29:22,984 WARN [RS:0;87fb68174c52:40433 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:29:22,985 INFO [RS:0;87fb68174c52:40433 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:29:22,985 DEBUG [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/WALs/87fb68174c52,40433,1730982562722 2024-11-07T12:29:22,992 INFO [RS:0;87fb68174c52:40433 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:29:22,996 INFO [RS:0;87fb68174c52:40433 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:29:22,996 INFO [RS:0;87fb68174c52:40433 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:29:22,996 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:22,997 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:29:22,998 INFO [RS:0;87fb68174c52:40433 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:29:22,998 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:22,998 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,998 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,998 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,998 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,998 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,999 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:29:22,999 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,999 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,999 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,999 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,999 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,999 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:22,999 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:29:22,999 DEBUG [RS:0;87fb68174c52:40433 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:29:23,005 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,005 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,005 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,005 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,005 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,005 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,40433,1730982562722-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:29:23,021 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:29:23,021 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,40433,1730982562722-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,021 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,022 INFO [RS:0;87fb68174c52:40433 {}] regionserver.Replication(171): 87fb68174c52,40433,1730982562722 started 2024-11-07T12:29:23,038 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,038 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(1482): Serving as 87fb68174c52,40433,1730982562722, RpcServer on 87fb68174c52/172.17.0.3:40433, sessionid=0x1001a37d5f70001 2024-11-07T12:29:23,038 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:29:23,038 DEBUG [RS:0;87fb68174c52:40433 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87fb68174c52,40433,1730982562722 2024-11-07T12:29:23,038 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,40433,1730982562722' 2024-11-07T12:29:23,038 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:29:23,039 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:29:23,040 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:29:23,040 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:29:23,040 DEBUG [RS:0;87fb68174c52:40433 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87fb68174c52,40433,1730982562722 2024-11-07T12:29:23,040 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,40433,1730982562722' 2024-11-07T12:29:23,040 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:29:23,041 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:29:23,041 DEBUG [RS:0;87fb68174c52:40433 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:29:23,041 INFO [RS:0;87fb68174c52:40433 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:29:23,041 INFO [RS:0;87fb68174c52:40433 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:29:23,134 WARN [87fb68174c52:36049 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:29:23,144 INFO [RS:0;87fb68174c52:40433 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C40433%2C1730982562722, suffix=, logDir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/WALs/87fb68174c52,40433,1730982562722, archiveDir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/oldWALs, maxLogs=32 2024-11-07T12:29:23,146 INFO [RS:0;87fb68174c52:40433 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C40433%2C1730982562722.1730982563146 2024-11-07T12:29:23,153 INFO [RS:0;87fb68174c52:40433 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/WALs/87fb68174c52,40433,1730982562722/87fb68174c52%2C40433%2C1730982562722.1730982563146 2024-11-07T12:29:23,154 DEBUG [RS:0;87fb68174c52:40433 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45249:45249),(127.0.0.1/127.0.0.1:32905:32905)] 2024-11-07T12:29:23,384 DEBUG [87fb68174c52:36049 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:29:23,385 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87fb68174c52,40433,1730982562722 2024-11-07T12:29:23,387 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,40433,1730982562722, state=OPENING 2024-11-07T12:29:23,389 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:29:23,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:23,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:23,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:29:23,392 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:29:23,392 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:29:23,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,40433,1730982562722}] 2024-11-07T12:29:23,545 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:29:23,548 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47655, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:29:23,552 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:29:23,552 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:29:23,554 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C40433%2C1730982562722.meta, suffix=.meta, logDir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/WALs/87fb68174c52,40433,1730982562722, archiveDir=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/oldWALs, maxLogs=32 2024-11-07T12:29:23,557 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C40433%2C1730982562722.meta.1730982563556.meta 2024-11-07T12:29:23,563 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/WALs/87fb68174c52,40433,1730982562722/87fb68174c52%2C40433%2C1730982562722.meta.1730982563556.meta 2024-11-07T12:29:23,564 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32905:32905),(127.0.0.1/127.0.0.1:45249:45249)] 2024-11-07T12:29:23,564 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:29:23,565 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:29:23,565 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:29:23,565 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:29:23,565 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:29:23,565 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:29:23,565 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:29:23,565 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:29:23,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:29:23,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:29:23,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:23,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:23,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:29:23,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:29:23,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:23,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:23,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:29:23,572 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:29:23,572 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:23,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:23,573 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:29:23,574 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:29:23,574 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:23,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:23,574 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:29:23,575 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740 2024-11-07T12:29:23,577 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740 2024-11-07T12:29:23,578 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:29:23,578 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:29:23,579 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:29:23,581 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:29:23,581 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847832, jitterRate=0.0780746191740036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:29:23,582 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:29:23,583 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730982563565Writing region info on filesystem at 1730982563565Initializing all the Stores at 1730982563567 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982563567Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982563567Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982563567Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982563567Cleaning up temporary data from old regions at 1730982563578 (+11 ms)Running coprocessor post-open hooks at 1730982563582 (+4 ms)Region opened successfully at 1730982563582 2024-11-07T12:29:23,584 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730982563545 2024-11-07T12:29:23,587 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:29:23,587 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:29:23,588 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,40433,1730982562722 2024-11-07T12:29:23,589 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,40433,1730982562722, state=OPEN 2024-11-07T12:29:23,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:29:23,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:29:23,594 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87fb68174c52,40433,1730982562722 2024-11-07T12:29:23,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:29:23,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:29:23,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:29:23,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,40433,1730982562722 in 202 msec 2024-11-07T12:29:23,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:29:23,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 617 msec 2024-11-07T12:29:23,601 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:29:23,601 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:29:23,603 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:29:23,603 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,40433,1730982562722, seqNum=-1] 2024-11-07T12:29:23,603 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:29:23,605 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55045, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:29:23,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 683 msec 2024-11-07T12:29:23,611 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730982563611, completionTime=-1 2024-11-07T12:29:23,611 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:29:23,611 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-07T12:29:23,613 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-07T12:29:23,613 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730982623613 2024-11-07T12:29:23,613 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730982683613 2024-11-07T12:29:23,613 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-07T12:29:23,614 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36049,1730982562666-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,614 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36049,1730982562666-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,614 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36049,1730982562666-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,614 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87fb68174c52:36049, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,614 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,614 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,616 DEBUG [master/87fb68174c52:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:29:23,619 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.852sec 2024-11-07T12:29:23,619 INFO [master/87fb68174c52:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:29:23,619 INFO [master/87fb68174c52:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:29:23,619 INFO [master/87fb68174c52:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:29:23,619 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:29:23,619 INFO [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:29:23,619 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36049,1730982562666-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:29:23,619 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36049,1730982562666-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:29:23,622 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:29:23,622 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:29:23,622 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36049,1730982562666-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:23,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab35378, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:29:23,650 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87fb68174c52,36049,-1 for getting cluster id 2024-11-07T12:29:23,650 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:29:23,652 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a533cdc5-78fc-48f3-83ea-49614717ba1d' 2024-11-07T12:29:23,653 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:29:23,653 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a533cdc5-78fc-48f3-83ea-49614717ba1d" 2024-11-07T12:29:23,654 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367dc70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:29:23,654 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87fb68174c52,36049,-1] 2024-11-07T12:29:23,654 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:29:23,654 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:23,656 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54316, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:29:23,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fa3cb60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:29:23,657 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:29:23,659 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,40433,1730982562722, seqNum=-1] 2024-11-07T12:29:23,659 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:29:23,661 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38042, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:29:23,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87fb68174c52,36049,1730982562666 2024-11-07T12:29:23,667 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:23,670 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:29:23,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:29:23,670 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:29:23,670 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:29:23,670 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:23,670 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:23,671 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:29:23,671 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:29:23,671 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2029051909, stopped=false 2024-11-07T12:29:23,671 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87fb68174c52,36049,1730982562666 2024-11-07T12:29:23,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:29:23,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:29:23,673 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:29:23,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:23,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:23,673 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:29:23,674 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:29:23,674 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:23,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:29:23,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:29:23,674 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87fb68174c52,40433,1730982562722' ***** 2024-11-07T12:29:23,674 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:29:23,674 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:29:23,675 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:29:23,675 INFO [RS:0;87fb68174c52:40433 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:29:23,675 INFO [RS:0;87fb68174c52:40433 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:29:23,675 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(959): stopping server 87fb68174c52,40433,1730982562722 2024-11-07T12:29:23,675 INFO [RS:0;87fb68174c52:40433 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:29:23,675 INFO [RS:0;87fb68174c52:40433 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87fb68174c52:40433. 2024-11-07T12:29:23,675 DEBUG [RS:0;87fb68174c52:40433 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:29:23,675 DEBUG [RS:0;87fb68174c52:40433 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:23,675 INFO [RS:0;87fb68174c52:40433 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:29:23,675 INFO [RS:0;87fb68174c52:40433 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:29:23,675 INFO [RS:0;87fb68174c52:40433 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:29:23,675 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:29:23,676 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-07T12:29:23,676 DEBUG [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-07T12:29:23,676 DEBUG [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-07T12:29:23,676 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:29:23,676 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:29:23,676 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:29:23,676 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:29:23,676 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:29:23,677 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-07T12:29:23,696 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740/.tmp/ns/e26bdb45983b47c188b971e5d607015c is 43, key is default/ns:d/1730982563605/Put/seqid=0 2024-11-07T12:29:23,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741835_1011 (size=5153) 2024-11-07T12:29:23,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741835_1011 (size=5153) 2024-11-07T12:29:23,703 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740/.tmp/ns/e26bdb45983b47c188b971e5d607015c 2024-11-07T12:29:23,711 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740/.tmp/ns/e26bdb45983b47c188b971e5d607015c as hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740/ns/e26bdb45983b47c188b971e5d607015c 2024-11-07T12:29:23,718 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740/ns/e26bdb45983b47c188b971e5d607015c, entries=2, sequenceid=6, filesize=5.0 K 2024-11-07T12:29:23,720 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false 2024-11-07T12:29:23,724 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-07T12:29:23,725 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:29:23,725 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:29:23,725 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982563676Running coprocessor pre-close hooks at 1730982563676Disabling compacts and flushes for region at 1730982563676Disabling writes for close at 1730982563676Obtaining lock to block concurrent updates at 1730982563677 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1730982563677Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1730982563677Flushing stores of hbase:meta,,1.1588230740 at 1730982563678 (+1 ms)Flushing 1588230740/ns: creating writer at 1730982563678Flushing 1588230740/ns: appending metadata at 1730982563695 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1730982563695Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@535a2094: reopening flushed file at 1730982563710 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false at 1730982563720 (+10 ms)Writing region close event to WAL at 1730982563720Running coprocessor post-close hooks at 1730982563725 (+5 ms)Closed at 1730982563725 2024-11-07T12:29:23,726 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:29:23,876 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(976): stopping server 87fb68174c52,40433,1730982562722; all regions closed. 2024-11-07T12:29:23,877 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:23,877 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:23,877 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:23,877 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:23,877 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:23,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741834_1010 (size=1152) 2024-11-07T12:29:23,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741834_1010 (size=1152) 2024-11-07T12:29:23,883 DEBUG [RS:0;87fb68174c52:40433 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/oldWALs 2024-11-07T12:29:23,883 INFO [RS:0;87fb68174c52:40433 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C40433%2C1730982562722.meta:.meta(num 1730982563556) 2024-11-07T12:29:23,883 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:23,883 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:23,883 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:23,883 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:23,883 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:23,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:29:23,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:29:23,888 DEBUG [RS:0;87fb68174c52:40433 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/oldWALs 2024-11-07T12:29:23,888 INFO [RS:0;87fb68174c52:40433 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C40433%2C1730982562722:(num 1730982563146) 2024-11-07T12:29:23,888 DEBUG [RS:0;87fb68174c52:40433 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:23,888 INFO [RS:0;87fb68174c52:40433 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:29:23,888 INFO [RS:0;87fb68174c52:40433 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:29:23,888 INFO [RS:0;87fb68174c52:40433 {}] hbase.ChoreService(370): Chore service for: regionserver/87fb68174c52:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T12:29:23,889 INFO [RS:0;87fb68174c52:40433 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:29:23,889 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:29:23,889 INFO [RS:0;87fb68174c52:40433 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40433 2024-11-07T12:29:23,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87fb68174c52,40433,1730982562722 2024-11-07T12:29:23,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:29:23,891 INFO [RS:0;87fb68174c52:40433 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:29:23,893 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87fb68174c52,40433,1730982562722] 2024-11-07T12:29:23,894 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87fb68174c52,40433,1730982562722 already deleted, retry=false 2024-11-07T12:29:23,894 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87fb68174c52,40433,1730982562722 expired; onlineServers=0 2024-11-07T12:29:23,894 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87fb68174c52,36049,1730982562666' ***** 2024-11-07T12:29:23,894 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:29:23,895 INFO [M:0;87fb68174c52:36049 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:29:23,895 INFO [M:0;87fb68174c52:36049 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:29:23,895 DEBUG [M:0;87fb68174c52:36049 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:29:23,895 DEBUG [M:0;87fb68174c52:36049 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:29:23,895 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:29:23,895 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982562933 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982562933,5,FailOnTimeoutGroup] 2024-11-07T12:29:23,895 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982562934 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982562934,5,FailOnTimeoutGroup] 2024-11-07T12:29:23,895 INFO [M:0;87fb68174c52:36049 {}] hbase.ChoreService(370): Chore service for: master/87fb68174c52:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:29:23,895 INFO [M:0;87fb68174c52:36049 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:29:23,895 DEBUG [M:0;87fb68174c52:36049 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:29:23,895 INFO [M:0;87fb68174c52:36049 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:29:23,895 INFO [M:0;87fb68174c52:36049 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:29:23,896 INFO [M:0;87fb68174c52:36049 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:29:23,896 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:29:23,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:29:23,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:23,897 DEBUG [M:0;87fb68174c52:36049 {}] zookeeper.ZKUtil(347): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:29:23,897 WARN [M:0;87fb68174c52:36049 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:29:23,898 INFO [M:0;87fb68174c52:36049 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/.lastflushedseqids 2024-11-07T12:29:23,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741836_1012 (size=99) 2024-11-07T12:29:23,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741836_1012 (size=99) 2024-11-07T12:29:23,904 INFO [M:0;87fb68174c52:36049 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:29:23,904 INFO [M:0;87fb68174c52:36049 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:29:23,904 DEBUG [M:0;87fb68174c52:36049 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:29:23,904 INFO [M:0;87fb68174c52:36049 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:23,904 DEBUG [M:0;87fb68174c52:36049 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:23,904 DEBUG [M:0;87fb68174c52:36049 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:29:23,904 DEBUG [M:0;87fb68174c52:36049 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:23,905 INFO [M:0;87fb68174c52:36049 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-07T12:29:23,922 DEBUG [M:0;87fb68174c52:36049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/152f860f36284b268b4689866206fb70 is 82, key is hbase:meta,,1/info:regioninfo/1730982563588/Put/seqid=0 2024-11-07T12:29:23,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741837_1013 (size=5672) 2024-11-07T12:29:23,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741837_1013 (size=5672) 2024-11-07T12:29:23,928 INFO [M:0;87fb68174c52:36049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/152f860f36284b268b4689866206fb70 2024-11-07T12:29:23,950 DEBUG [M:0;87fb68174c52:36049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee7f36f35c7543ef9a28218faa2597a7 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1730982563610/Put/seqid=0 2024-11-07T12:29:23,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741838_1014 (size=5275) 2024-11-07T12:29:23,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741838_1014 (size=5275) 2024-11-07T12:29:23,956 INFO [M:0;87fb68174c52:36049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee7f36f35c7543ef9a28218faa2597a7 2024-11-07T12:29:23,977 DEBUG [M:0;87fb68174c52:36049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c56b3f85e62142e7bbb4a7a82f1ece6f is 69, key is 87fb68174c52,40433,1730982562722/rs:state/1730982562979/Put/seqid=0 2024-11-07T12:29:23,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741839_1015 (size=5156) 2024-11-07T12:29:23,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741839_1015 (size=5156) 2024-11-07T12:29:23,985 INFO [M:0;87fb68174c52:36049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c56b3f85e62142e7bbb4a7a82f1ece6f 2024-11-07T12:29:23,993 INFO [RS:0;87fb68174c52:40433 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:29:23,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:29:23,993 INFO [RS:0;87fb68174c52:40433 {}] regionserver.HRegionServer(1031): Exiting; stopping=87fb68174c52,40433,1730982562722; zookeeper connection closed. 2024-11-07T12:29:23,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40433-0x1001a37d5f70001, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:29:23,994 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7cf21c7f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7cf21c7f 2024-11-07T12:29:23,994 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:29:24,007 DEBUG [M:0;87fb68174c52:36049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b39849d82e64410082018dd61f6c8a29 is 52, key is load_balancer_on/state:d/1730982563669/Put/seqid=0 2024-11-07T12:29:24,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741840_1016 (size=5056) 2024-11-07T12:29:24,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741840_1016 (size=5056) 2024-11-07T12:29:24,013 INFO [M:0;87fb68174c52:36049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b39849d82e64410082018dd61f6c8a29 2024-11-07T12:29:24,021 DEBUG [M:0;87fb68174c52:36049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/152f860f36284b268b4689866206fb70 as hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/152f860f36284b268b4689866206fb70 2024-11-07T12:29:24,027 INFO [M:0;87fb68174c52:36049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/152f860f36284b268b4689866206fb70, entries=8, sequenceid=29, filesize=5.5 K 2024-11-07T12:29:24,029 DEBUG [M:0;87fb68174c52:36049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee7f36f35c7543ef9a28218faa2597a7 as hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ee7f36f35c7543ef9a28218faa2597a7 2024-11-07T12:29:24,035 INFO [M:0;87fb68174c52:36049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ee7f36f35c7543ef9a28218faa2597a7, entries=3, sequenceid=29, filesize=5.2 K 2024-11-07T12:29:24,036 DEBUG [M:0;87fb68174c52:36049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c56b3f85e62142e7bbb4a7a82f1ece6f as hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c56b3f85e62142e7bbb4a7a82f1ece6f 2024-11-07T12:29:24,042 INFO [M:0;87fb68174c52:36049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c56b3f85e62142e7bbb4a7a82f1ece6f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-07T12:29:24,044 DEBUG [M:0;87fb68174c52:36049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b39849d82e64410082018dd61f6c8a29 as hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b39849d82e64410082018dd61f6c8a29 2024-11-07T12:29:24,049 INFO [M:0;87fb68174c52:36049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46863/user/jenkins/test-data/a4065e8e-34bb-61de-e424-b7ea19753ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b39849d82e64410082018dd61f6c8a29, entries=1, sequenceid=29, filesize=4.9 K 2024-11-07T12:29:24,051 INFO [M:0;87fb68174c52:36049 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=29, compaction requested=false 2024-11-07T12:29:24,052 INFO [M:0;87fb68174c52:36049 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:24,053 DEBUG [M:0;87fb68174c52:36049 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982563904Disabling compacts and flushes for region at 1730982563904Disabling writes for close at 1730982563904Obtaining lock to block concurrent updates at 1730982563905 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730982563905Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1730982563905Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730982563906 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730982563906Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730982563922 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730982563922Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730982563934 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730982563949 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730982563950 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730982563961 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730982563976 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730982563977 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730982563991 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730982564007 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730982564007Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dfff8b4: reopening flushed file at 1730982564020 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6871ffbb: reopening flushed file at 1730982564028 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@700928c1: reopening flushed file at 1730982564035 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8403f72: reopening flushed file at 1730982564043 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=29, compaction requested=false at 1730982564051 (+8 ms)Writing region close event to WAL at 1730982564052 (+1 ms)Closed at 1730982564052 2024-11-07T12:29:24,053 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:24,053 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:24,053 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:24,053 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:24,054 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:24,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40487 is added to blk_1073741830_1006 (size=10311) 2024-11-07T12:29:24,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42235 is added to blk_1073741830_1006 (size=10311) 2024-11-07T12:29:24,057 INFO [M:0;87fb68174c52:36049 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:29:24,057 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:29:24,057 INFO [M:0;87fb68174c52:36049 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36049 2024-11-07T12:29:24,057 INFO [M:0;87fb68174c52:36049 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:29:24,159 INFO [M:0;87fb68174c52:36049 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:29:24,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:29:24,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36049-0x1001a37d5f70000, quorum=127.0.0.1:49934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:29:24,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6838bf55{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:24,164 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@254e0164{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:29:24,164 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:29:24,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1be80f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:29:24,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@274298f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/hadoop.log.dir/,STOPPED} 2024-11-07T12:29:24,166 WARN [BP-1850490054-172.17.0.3-1730982561961 heartbeating to localhost/127.0.0.1:46863 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:29:24,166 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:29:24,166 WARN [BP-1850490054-172.17.0.3-1730982561961 heartbeating to localhost/127.0.0.1:46863 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1850490054-172.17.0.3-1730982561961 (Datanode Uuid 7488632a-7fa5-4840-b8e9-e635c297f9ab) service to localhost/127.0.0.1:46863 2024-11-07T12:29:24,166 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:29:24,166 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/data/data3/current/BP-1850490054-172.17.0.3-1730982561961 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:24,167 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/data/data4/current/BP-1850490054-172.17.0.3-1730982561961 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:24,167 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:29:24,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2152d149{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:24,172 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d281952{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:29:24,172 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:29:24,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@eab7acc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:29:24,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@469dec96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/hadoop.log.dir/,STOPPED} 2024-11-07T12:29:24,174 WARN [BP-1850490054-172.17.0.3-1730982561961 heartbeating to localhost/127.0.0.1:46863 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:29:24,174 WARN [BP-1850490054-172.17.0.3-1730982561961 heartbeating to localhost/127.0.0.1:46863 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1850490054-172.17.0.3-1730982561961 (Datanode Uuid 9159dc2b-cba6-4207-bbe7-94819d267dd5) service to localhost/127.0.0.1:46863 2024-11-07T12:29:24,174 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:29:24,174 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:29:24,175 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/data/data1/current/BP-1850490054-172.17.0.3-1730982561961 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:24,175 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/cluster_7dc445e3-5a33-1553-d109-58efa242a05e/data/data2/current/BP-1850490054-172.17.0.3-1730982561961 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:24,175 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:29:24,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b8ef2ff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:29:24,182 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10885b70{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:29:24,182 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:29:24,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f681677{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:29:24,183 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59505eb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/hadoop.log.dir/,STOPPED} 2024-11-07T12:29:24,189 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:29:24,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:29:24,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:29:24,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/hadoop.log.dir so I do NOT create it in target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79 2024-11-07T12:29:24,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f76cec5-ba6e-f84d-3d65-590c23292446/hadoop.tmp.dir so I do NOT create it in target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79 2024-11-07T12:29:24,210 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc, deleteOnExit=true 2024-11-07T12:29:24,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:29:24,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/test.cache.data in system properties and HBase conf 2024-11-07T12:29:24,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:29:24,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:29:24,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:29:24,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:29:24,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:29:24,211 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:29:24,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:29:24,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:29:24,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:29:24,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:29:24,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:29:24,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:29:24,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:29:24,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:29:24,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:29:24,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:29:24,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:29:24,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:29:24,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:29:24,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:29:24,226 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:29:24,301 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:24,308 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:29:24,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:29:24,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:29:24,310 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:29:24,311 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:24,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42b52d44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:29:24,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2305029e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:29:24,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62b96b7c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/java.io.tmpdir/jetty-localhost-34591-hadoop-hdfs-3_4_1-tests_jar-_-any-5725726631500244352/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:29:24,430 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fd186ec{HTTP/1.1, (http/1.1)}{localhost:34591} 2024-11-07T12:29:24,430 INFO [Time-limited test {}] server.Server(415): Started @103103ms 2024-11-07T12:29:24,445 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:29:24,514 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:24,518 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:29:24,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:29:24,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:29:24,519 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:29:24,519 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a107105{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:29:24,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48743db4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:29:24,635 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14d09ab9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/java.io.tmpdir/jetty-localhost-42337-hadoop-hdfs-3_4_1-tests_jar-_-any-5222754214793166033/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:24,635 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@ee6b493{HTTP/1.1, (http/1.1)}{localhost:42337} 2024-11-07T12:29:24,635 INFO [Time-limited test {}] server.Server(415): Started @103308ms 2024-11-07T12:29:24,638 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:29:24,672 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:24,676 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:29:24,677 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:29:24,677 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:29:24,677 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:29:24,679 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eee535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:29:24,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ec1a06e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:29:24,733 WARN [Thread-659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data2/current/BP-1009609649-172.17.0.3-1730982564245/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:24,733 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data1/current/BP-1009609649-172.17.0.3-1730982564245/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:24,750 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:29:24,753 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66788e3207436a54 with lease ID 0x9849c9739aa860bc: Processing first storage report for DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee from datanode DatanodeRegistration(127.0.0.1:36671, datanodeUuid=b39723c6-9828-464f-9004-aeee40a8d52a, infoPort=40095, infoSecurePort=0, ipcPort=40541, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245) 2024-11-07T12:29:24,753 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66788e3207436a54 with lease ID 0x9849c9739aa860bc: from storage DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee node DatanodeRegistration(127.0.0.1:36671, datanodeUuid=b39723c6-9828-464f-9004-aeee40a8d52a, infoPort=40095, infoSecurePort=0, ipcPort=40541, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:24,754 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66788e3207436a54 with lease ID 0x9849c9739aa860bc: Processing first storage report for DS-04a3c2ff-ab49-4ca9-8c4f-38193fcf41db from datanode DatanodeRegistration(127.0.0.1:36671, datanodeUuid=b39723c6-9828-464f-9004-aeee40a8d52a, infoPort=40095, infoSecurePort=0, ipcPort=40541, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245) 2024-11-07T12:29:24,754 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66788e3207436a54 with lease ID 0x9849c9739aa860bc: from storage DS-04a3c2ff-ab49-4ca9-8c4f-38193fcf41db node DatanodeRegistration(127.0.0.1:36671, datanodeUuid=b39723c6-9828-464f-9004-aeee40a8d52a, infoPort=40095, infoSecurePort=0, ipcPort=40541, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:24,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7330fb3f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/java.io.tmpdir/jetty-localhost-36347-hadoop-hdfs-3_4_1-tests_jar-_-any-6102641402373007328/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:24,795 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20aa2ea7{HTTP/1.1, (http/1.1)}{localhost:36347} 2024-11-07T12:29:24,796 INFO [Time-limited test {}] server.Server(415): Started @103468ms 2024-11-07T12:29:24,797 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:29:24,884 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data3/current/BP-1009609649-172.17.0.3-1730982564245/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:24,884 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data4/current/BP-1009609649-172.17.0.3-1730982564245/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:24,907 WARN [Thread-673 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:29:24,910 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x897da61c57ee40bf with lease ID 0x9849c9739aa860bd: Processing first storage report for DS-c263f208-3cd5-400f-be78-6d95bfd6cf86 from datanode DatanodeRegistration(127.0.0.1:41511, datanodeUuid=e50dee72-73bb-452a-a5d4-1fb4c3bf8057, infoPort=46487, infoSecurePort=0, ipcPort=34973, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245) 2024-11-07T12:29:24,910 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x897da61c57ee40bf with lease ID 0x9849c9739aa860bd: from storage DS-c263f208-3cd5-400f-be78-6d95bfd6cf86 node DatanodeRegistration(127.0.0.1:41511, datanodeUuid=e50dee72-73bb-452a-a5d4-1fb4c3bf8057, infoPort=46487, infoSecurePort=0, ipcPort=34973, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:24,910 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x897da61c57ee40bf with lease ID 0x9849c9739aa860bd: Processing first storage report for DS-094b765d-ef82-45b9-ab89-493e766bd3c9 from datanode DatanodeRegistration(127.0.0.1:41511, datanodeUuid=e50dee72-73bb-452a-a5d4-1fb4c3bf8057, infoPort=46487, infoSecurePort=0, ipcPort=34973, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245) 2024-11-07T12:29:24,910 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x897da61c57ee40bf with lease ID 0x9849c9739aa860bd: from storage DS-094b765d-ef82-45b9-ab89-493e766bd3c9 node DatanodeRegistration(127.0.0.1:41511, datanodeUuid=e50dee72-73bb-452a-a5d4-1fb4c3bf8057, infoPort=46487, infoSecurePort=0, ipcPort=34973, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:24,925 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79 2024-11-07T12:29:24,927 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/zookeeper_0, clientPort=62457, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:29:24,928 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62457 2024-11-07T12:29:24,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:24,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:24,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41511 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:29:24,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36671 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:29:24,942 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c with version=8 2024-11-07T12:29:24,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/hbase-staging 2024-11-07T12:29:24,944 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:29:24,944 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:24,944 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:24,944 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:29:24,944 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:24,944 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:29:24,945 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:29:24,945 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:29:24,945 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41297 2024-11-07T12:29:24,947 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41297 connecting to ZooKeeper ensemble=127.0.0.1:62457 2024-11-07T12:29:24,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:412970x0, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:29:24,954 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41297-0x1001a37dedd0000 connected 2024-11-07T12:29:24,971 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:24,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:24,975 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:29:24,975 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c, hbase.cluster.distributed=false 2024-11-07T12:29:24,977 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:29:24,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41297 2024-11-07T12:29:24,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41297 2024-11-07T12:29:24,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41297 2024-11-07T12:29:24,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41297 2024-11-07T12:29:24,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41297 2024-11-07T12:29:24,995 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:29:24,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:24,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:24,995 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:29:24,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:24,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:29:24,995 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:29:24,995 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:29:24,996 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45061 2024-11-07T12:29:24,997 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45061 connecting to ZooKeeper ensemble=127.0.0.1:62457 2024-11-07T12:29:24,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:25,000 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:25,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:450610x0, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:29:25,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:450610x0, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:29:25,005 INFO [regionserver/87fb68174c52:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:29:25,005 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45061-0x1001a37dedd0001 connected 2024-11-07T12:29:25,006 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:29:25,006 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:29:25,007 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:29:25,008 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:29:25,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45061 2024-11-07T12:29:25,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45061 2024-11-07T12:29:25,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45061 2024-11-07T12:29:25,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45061 2024-11-07T12:29:25,015 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45061 2024-11-07T12:29:25,027 DEBUG [M:0;87fb68174c52:41297 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87fb68174c52:41297 2024-11-07T12:29:25,027 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87fb68174c52,41297,1730982564944 2024-11-07T12:29:25,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:29:25,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:29:25,030 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87fb68174c52,41297,1730982564944 2024-11-07T12:29:25,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:29:25,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,032 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:29:25,033 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87fb68174c52,41297,1730982564944 from backup master directory 2024-11-07T12:29:25,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87fb68174c52,41297,1730982564944 2024-11-07T12:29:25,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:29:25,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:29:25,034 WARN [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:29:25,034 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87fb68174c52,41297,1730982564944 2024-11-07T12:29:25,039 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/hbase.id] with ID: a451c9b5-1323-42dc-90b2-2518473c83a9 2024-11-07T12:29:25,039 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/.tmp/hbase.id 2024-11-07T12:29:25,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41511 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:29:25,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36671 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:29:25,046 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/.tmp/hbase.id]:[hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/hbase.id] 2024-11-07T12:29:25,060 INFO [master/87fb68174c52:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:25,060 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:29:25,062 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-07T12:29:25,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36671 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:29:25,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41511 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:29:25,074 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:29:25,075 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:29:25,076 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:29:25,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41511 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:29:25,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36671 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:29:25,085 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store 2024-11-07T12:29:25,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41511 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:29:25,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36671 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:29:25,094 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:29:25,094 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:29:25,094 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:25,094 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:25,094 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:29:25,094 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:25,094 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:29:25,094 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982565094Disabling compacts and flushes for region at 1730982565094Disabling writes for close at 1730982565094Writing region close event to WAL at 1730982565094Closed at 1730982565094 2024-11-07T12:29:25,095 WARN [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/.initializing 2024-11-07T12:29:25,095 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944 2024-11-07T12:29:25,098 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C41297%2C1730982564944, suffix=, logDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944, archiveDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/oldWALs, maxLogs=10 2024-11-07T12:29:25,099 INFO [master/87fb68174c52:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C41297%2C1730982564944.1730982565099 2024-11-07T12:29:25,104 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944/87fb68174c52%2C41297%2C1730982564944.1730982565099 2024-11-07T12:29:25,109 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46487:46487),(127.0.0.1/127.0.0.1:40095:40095)] 2024-11-07T12:29:25,111 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:29:25,111 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:29:25,111 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,111 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,112 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:29:25,114 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:25,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:29:25,116 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:29:25,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:29:25,118 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:29:25,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:29:25,120 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:29:25,120 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,121 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,121 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,123 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,123 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,123 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:29:25,125 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:29:25,129 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:29:25,130 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778542, jitterRate=-0.010032907128334045}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:29:25,131 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730982565111Initializing all the Stores at 1730982565112 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982565112Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982565112Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982565112Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982565112Cleaning up temporary data from old regions at 1730982565123 (+11 ms)Region opened successfully at 1730982565130 (+7 ms) 2024-11-07T12:29:25,131 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:29:25,134 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7466233f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:29:25,135 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:29:25,136 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:29:25,136 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:29:25,136 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:29:25,136 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:29:25,137 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:29:25,137 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:29:25,141 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:29:25,142 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:29:25,143 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:29:25,144 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:29:25,144 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:29:25,146 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:29:25,146 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:29:25,147 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:29:25,148 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:29:25,150 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:29:25,151 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:29:25,153 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:29:25,155 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:29:25,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:29:25,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:29:25,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,157 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87fb68174c52,41297,1730982564944, sessionid=0x1001a37dedd0000, setting cluster-up flag (Was=false) 2024-11-07T12:29:25,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,166 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:29:25,167 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,41297,1730982564944 2024-11-07T12:29:25,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,176 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:29:25,178 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,41297,1730982564944 2024-11-07T12:29:25,179 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:29:25,182 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:29:25,182 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:29:25,182 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:29:25,182 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87fb68174c52,41297,1730982564944 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:29:25,184 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:29:25,184 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:29:25,184 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:29:25,184 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:29:25,184 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87fb68174c52:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:29:25,184 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,185 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:29:25,185 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,187 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:29:25,187 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:29:25,189 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,189 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:29:25,193 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730982595193 2024-11-07T12:29:25,194 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:29:25,194 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:29:25,194 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:29:25,194 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:29:25,194 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:29:25,194 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:29:25,196 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,200 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:29:25,200 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:29:25,200 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:29:25,201 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:29:25,201 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:29:25,203 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982565201,5,FailOnTimeoutGroup] 2024-11-07T12:29:25,204 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982565203,5,FailOnTimeoutGroup] 2024-11-07T12:29:25,204 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,204 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:29:25,204 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,204 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41511 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:29:25,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36671 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:29:25,208 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:29:25,208 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c 2024-11-07T12:29:25,218 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(746): ClusterId : a451c9b5-1323-42dc-90b2-2518473c83a9 2024-11-07T12:29:25,218 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:29:25,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41511 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:29:25,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36671 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:29:25,221 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:29:25,221 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:29:25,222 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:29:25,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:29:25,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:29:25,224 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:29:25,224 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,225 DEBUG [RS:0;87fb68174c52:45061 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd86a42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:29:25,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:25,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:29:25,226 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:29:25,227 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:25,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:29:25,229 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:29:25,229 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,229 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:25,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:29:25,231 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:29:25,231 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:25,232 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:29:25,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740 2024-11-07T12:29:25,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740 2024-11-07T12:29:25,235 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:29:25,235 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:29:25,236 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:29:25,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:29:25,240 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:29:25,241 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808486, jitterRate=0.028043419122695923}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:29:25,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730982565221Initializing all the Stores at 1730982565222 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982565222Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982565222Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982565222Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982565222Cleaning up temporary data from old regions at 1730982565235 (+13 ms)Region opened successfully at 1730982565241 (+6 ms) 2024-11-07T12:29:25,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:29:25,242 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:29:25,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:29:25,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:29:25,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:29:25,242 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:29:25,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982565242Disabling compacts and flushes for region at 1730982565242Disabling writes for close at 1730982565242Writing region close event to WAL at 1730982565242Closed at 1730982565242 2024-11-07T12:29:25,244 DEBUG [RS:0;87fb68174c52:45061 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87fb68174c52:45061 2024-11-07T12:29:25,244 INFO [RS:0;87fb68174c52:45061 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:29:25,244 INFO [RS:0;87fb68174c52:45061 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:29:25,244 DEBUG [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:29:25,244 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:29:25,244 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:29:25,244 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:29:25,245 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(2659): reportForDuty to master=87fb68174c52,41297,1730982564944 with port=45061, startcode=1730982564994 2024-11-07T12:29:25,245 DEBUG [RS:0;87fb68174c52:45061 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:29:25,246 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:29:25,248 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:29:25,248 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:32995, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:29:25,248 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41297 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87fb68174c52,45061,1730982564994 2024-11-07T12:29:25,249 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41297 {}] master.ServerManager(517): Registering regionserver=87fb68174c52,45061,1730982564994 2024-11-07T12:29:25,250 DEBUG [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c 2024-11-07T12:29:25,250 DEBUG [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39665 2024-11-07T12:29:25,250 DEBUG [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:29:25,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:29:25,253 DEBUG [RS:0;87fb68174c52:45061 {}] zookeeper.ZKUtil(111): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87fb68174c52,45061,1730982564994 2024-11-07T12:29:25,253 WARN [RS:0;87fb68174c52:45061 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:29:25,253 INFO [RS:0;87fb68174c52:45061 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:29:25,253 DEBUG [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994 2024-11-07T12:29:25,257 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87fb68174c52,45061,1730982564994] 2024-11-07T12:29:25,260 INFO [RS:0;87fb68174c52:45061 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:29:25,263 INFO [RS:0;87fb68174c52:45061 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:29:25,264 INFO [RS:0;87fb68174c52:45061 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:29:25,264 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,264 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:29:25,265 INFO [RS:0;87fb68174c52:45061 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:29:25,265 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:29:25,266 DEBUG [RS:0;87fb68174c52:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:29:25,267 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,267 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,267 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,267 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,267 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,267 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,45061,1730982564994-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:29:25,284 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:29:25,284 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,45061,1730982564994-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,284 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,284 INFO [RS:0;87fb68174c52:45061 {}] regionserver.Replication(171): 87fb68174c52,45061,1730982564994 started 2024-11-07T12:29:25,300 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,300 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(1482): Serving as 87fb68174c52,45061,1730982564994, RpcServer on 87fb68174c52/172.17.0.3:45061, sessionid=0x1001a37dedd0001 2024-11-07T12:29:25,300 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:29:25,300 DEBUG [RS:0;87fb68174c52:45061 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87fb68174c52,45061,1730982564994 2024-11-07T12:29:25,300 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,45061,1730982564994' 2024-11-07T12:29:25,300 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:29:25,301 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:29:25,301 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:29:25,302 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:29:25,302 DEBUG [RS:0;87fb68174c52:45061 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87fb68174c52,45061,1730982564994 2024-11-07T12:29:25,302 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,45061,1730982564994' 2024-11-07T12:29:25,302 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:29:25,302 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:29:25,303 DEBUG [RS:0;87fb68174c52:45061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:29:25,303 INFO [RS:0;87fb68174c52:45061 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:29:25,303 INFO [RS:0;87fb68174c52:45061 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:29:25,398 WARN [87fb68174c52:41297 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:29:25,405 INFO [RS:0;87fb68174c52:45061 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C45061%2C1730982564994, suffix=, logDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994, archiveDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/oldWALs, maxLogs=32 2024-11-07T12:29:25,406 INFO [RS:0;87fb68174c52:45061 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C45061%2C1730982564994.1730982565406 2024-11-07T12:29:25,413 INFO [RS:0;87fb68174c52:45061 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 2024-11-07T12:29:25,416 DEBUG [RS:0;87fb68174c52:45061 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40095:40095),(127.0.0.1/127.0.0.1:46487:46487)] 2024-11-07T12:29:25,648 DEBUG [87fb68174c52:41297 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:29:25,649 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87fb68174c52,45061,1730982564994 2024-11-07T12:29:25,650 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,45061,1730982564994, state=OPENING 2024-11-07T12:29:25,652 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:29:25,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:29:25,654 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:29:25,654 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:29:25,654 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:29:25,655 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,45061,1730982564994}] 2024-11-07T12:29:25,807 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:29:25,809 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46801, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:29:25,813 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:29:25,814 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:29:25,816 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C45061%2C1730982564994.meta, suffix=.meta, logDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994, archiveDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/oldWALs, maxLogs=32 2024-11-07T12:29:25,817 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta 2024-11-07T12:29:25,823 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta 2024-11-07T12:29:25,825 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46487:46487),(127.0.0.1/127.0.0.1:40095:40095)] 2024-11-07T12:29:25,830 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:29:25,830 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:29:25,830 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:29:25,830 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:29:25,830 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:29:25,830 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:29:25,831 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:29:25,831 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:29:25,832 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:29:25,833 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:29:25,834 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:25,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:29:25,835 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:29:25,835 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:25,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:29:25,836 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:29:25,837 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:25,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:29:25,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:29:25,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:25,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:29:25,838 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:29:25,839 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740 2024-11-07T12:29:25,840 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740 2024-11-07T12:29:25,842 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:29:25,842 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:29:25,842 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:29:25,844 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:29:25,845 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777878, jitterRate=-0.010877415537834167}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:29:25,845 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:29:25,846 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730982565831Writing region info on filesystem at 1730982565831Initializing all the Stores at 1730982565832 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982565832Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982565832Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982565832Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982565832Cleaning up temporary data from old regions at 1730982565842 (+10 ms)Running coprocessor post-open hooks at 1730982565845 (+3 ms)Region opened successfully at 1730982565846 (+1 ms) 2024-11-07T12:29:25,847 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730982565807 2024-11-07T12:29:25,850 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:29:25,850 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:29:25,851 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,45061,1730982564994 2024-11-07T12:29:25,852 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,45061,1730982564994, state=OPEN 2024-11-07T12:29:25,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:29:25,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:29:25,856 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87fb68174c52,45061,1730982564994 2024-11-07T12:29:25,856 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:29:25,856 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:29:25,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:29:25,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,45061,1730982564994 in 202 msec 2024-11-07T12:29:25,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:29:25,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-11-07T12:29:25,864 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:29:25,864 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:29:25,865 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:29:25,865 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,45061,1730982564994, seqNum=-1] 2024-11-07T12:29:25,866 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:29:25,867 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33227, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:29:25,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 692 msec 2024-11-07T12:29:25,874 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730982565874, completionTime=-1 2024-11-07T12:29:25,874 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:29:25,874 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-07T12:29:25,876 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-07T12:29:25,876 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730982625876 2024-11-07T12:29:25,876 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730982685876 2024-11-07T12:29:25,876 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-07T12:29:25,877 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,41297,1730982564944-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,877 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,41297,1730982564944-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,877 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,41297,1730982564944-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,877 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87fb68174c52:41297, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,877 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,877 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,879 DEBUG [master/87fb68174c52:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:29:25,881 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.847sec 2024-11-07T12:29:25,881 INFO [master/87fb68174c52:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:29:25,881 INFO [master/87fb68174c52:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:29:25,881 INFO [master/87fb68174c52:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:29:25,881 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:29:25,881 INFO [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:29:25,881 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,41297,1730982564944-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:29:25,881 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,41297,1730982564944-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:29:25,884 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:29:25,884 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:29:25,884 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,41297,1730982564944-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:25,918 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e8113bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:29:25,918 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87fb68174c52,41297,-1 for getting cluster id 2024-11-07T12:29:25,918 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:29:25,920 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a451c9b5-1323-42dc-90b2-2518473c83a9' 2024-11-07T12:29:25,921 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:29:25,921 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a451c9b5-1323-42dc-90b2-2518473c83a9" 2024-11-07T12:29:25,921 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55b69c49, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:29:25,921 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87fb68174c52,41297,-1] 2024-11-07T12:29:25,921 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:29:25,921 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:29:25,923 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43404, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:29:25,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10c7ae56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:29:25,925 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:29:25,926 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,45061,1730982564994, seqNum=-1] 2024-11-07T12:29:25,926 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:29:25,928 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53220, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:29:25,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87fb68174c52,41297,1730982564944 2024-11-07T12:29:25,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:25,934 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:29:25,959 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:29:25,959 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:25,959 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:25,959 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:29:25,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:29:25,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:29:25,960 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:29:25,960 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:29:25,961 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42371 2024-11-07T12:29:25,963 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42371 connecting to ZooKeeper ensemble=127.0.0.1:62457 2024-11-07T12:29:25,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:25,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:29:25,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423710x0, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:29:25,973 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42371-0x1001a37dedd0002 connected 2024-11-07T12:29:25,973 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42371-0x1001a37dedd0002, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-07T12:29:25,973 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-07T12:29:25,974 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:29:25,974 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:29:25,975 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42371-0x1001a37dedd0002, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:29:25,977 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42371-0x1001a37dedd0002, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:29:25,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42371 2024-11-07T12:29:25,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42371 2024-11-07T12:29:25,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42371 2024-11-07T12:29:25,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42371 2024-11-07T12:29:25,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42371 2024-11-07T12:29:25,982 INFO [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(746): ClusterId : a451c9b5-1323-42dc-90b2-2518473c83a9 2024-11-07T12:29:25,982 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:29:25,984 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:29:25,984 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:29:25,986 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:29:25,987 DEBUG [RS:1;87fb68174c52:42371 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ddf767a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:29:26,027 DEBUG [RS:1;87fb68174c52:42371 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;87fb68174c52:42371 2024-11-07T12:29:26,027 INFO [RS:1;87fb68174c52:42371 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:29:26,027 INFO [RS:1;87fb68174c52:42371 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:29:26,027 DEBUG [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:29:26,029 INFO [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(2659): reportForDuty to master=87fb68174c52,41297,1730982564944 with port=42371, startcode=1730982565959 2024-11-07T12:29:26,029 DEBUG [RS:1;87fb68174c52:42371 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:29:26,031 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57337, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:29:26,031 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41297 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87fb68174c52,42371,1730982565959 2024-11-07T12:29:26,031 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41297 {}] master.ServerManager(517): Registering regionserver=87fb68174c52,42371,1730982565959 2024-11-07T12:29:26,033 DEBUG [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c 2024-11-07T12:29:26,033 DEBUG [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39665 2024-11-07T12:29:26,033 DEBUG [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:29:26,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:29:26,036 DEBUG [RS:1;87fb68174c52:42371 {}] zookeeper.ZKUtil(111): regionserver:42371-0x1001a37dedd0002, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87fb68174c52,42371,1730982565959 2024-11-07T12:29:26,036 WARN [RS:1;87fb68174c52:42371 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:29:26,036 INFO [RS:1;87fb68174c52:42371 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:29:26,036 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87fb68174c52,42371,1730982565959] 2024-11-07T12:29:26,036 DEBUG [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959 2024-11-07T12:29:26,040 INFO [RS:1;87fb68174c52:42371 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:29:26,042 INFO [RS:1;87fb68174c52:42371 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:29:26,042 INFO [RS:1;87fb68174c52:42371 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:29:26,042 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:26,044 INFO [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:29:26,045 INFO [RS:1;87fb68174c52:42371 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:29:26,045 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:26,045 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,045 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,045 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,045 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,046 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,046 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:29:26,046 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,046 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,046 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,046 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,046 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,046 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:29:26,046 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:29:26,046 DEBUG [RS:1;87fb68174c52:42371 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:29:26,047 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:26,047 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:26,047 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:26,047 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:26,047 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:26,047 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,42371,1730982565959-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:29:26,061 INFO [RS:1;87fb68174c52:42371 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:29:26,062 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,42371,1730982565959-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:26,062 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:26,062 INFO [RS:1;87fb68174c52:42371 {}] regionserver.Replication(171): 87fb68174c52,42371,1730982565959 started 2024-11-07T12:29:26,075 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:29:26,075 INFO [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(1482): Serving as 87fb68174c52,42371,1730982565959, RpcServer on 87fb68174c52/172.17.0.3:42371, sessionid=0x1001a37dedd0002 2024-11-07T12:29:26,075 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:29:26,075 DEBUG [RS:1;87fb68174c52:42371 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87fb68174c52,42371,1730982565959 2024-11-07T12:29:26,075 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,42371,1730982565959' 2024-11-07T12:29:26,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;87fb68174c52:42371,5,FailOnTimeoutGroup] 2024-11-07T12:29:26,075 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:29:26,076 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-07T12:29:26,076 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:29:26,076 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T12:29:26,077 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:29:26,077 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:29:26,077 DEBUG [RS:1;87fb68174c52:42371 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87fb68174c52,42371,1730982565959 2024-11-07T12:29:26,077 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,42371,1730982565959' 2024-11-07T12:29:26,077 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:29:26,077 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:29:26,077 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 87fb68174c52,41297,1730982564944 2024-11-07T12:29:26,077 DEBUG [RS:1;87fb68174c52:42371 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:29:26,077 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@778637ca 2024-11-07T12:29:26,077 INFO [RS:1;87fb68174c52:42371 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:29:26,078 INFO [RS:1;87fb68174c52:42371 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:29:26,078 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:29:26,079 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43406, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:29:26,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41297 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-07T12:29:26,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41297 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-07T12:29:26,080 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41297 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:29:26,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41297 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-07T12:29:26,083 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:29:26,083 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:26,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41297 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-07T12:29:26,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41297 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:29:26,085 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:29:26,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41511 is added to blk_1073741835_1011 (size=393) 2024-11-07T12:29:26,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36671 is added to blk_1073741835_1011 (size=393) 2024-11-07T12:29:26,095 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a522e52eecdb10e9bf4c387c36fbbb6c, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c 2024-11-07T12:29:26,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36671 is added to blk_1073741836_1012 (size=76) 2024-11-07T12:29:26,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41511 is added to blk_1073741836_1012 (size=76) 2024-11-07T12:29:26,102 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:29:26,102 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing a522e52eecdb10e9bf4c387c36fbbb6c, disabling compactions & flushes 2024-11-07T12:29:26,102 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:29:26,102 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:29:26,102 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. after waiting 0 ms 2024-11-07T12:29:26,102 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:29:26,102 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:29:26,102 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for a522e52eecdb10e9bf4c387c36fbbb6c: Waiting for close lock at 1730982566102Disabling compacts and flushes for region at 1730982566102Disabling writes for close at 1730982566102Writing region close event to WAL at 1730982566102Closed at 1730982566102 2024-11-07T12:29:26,104 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:29:26,104 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1730982566104"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730982566104"}]},"ts":"1730982566104"} 2024-11-07T12:29:26,107 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T12:29:26,108 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:29:26,108 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730982566108"}]},"ts":"1730982566108"} 2024-11-07T12:29:26,110 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-07T12:29:26,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a522e52eecdb10e9bf4c387c36fbbb6c, ASSIGN}] 2024-11-07T12:29:26,112 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a522e52eecdb10e9bf4c387c36fbbb6c, ASSIGN 2024-11-07T12:29:26,113 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a522e52eecdb10e9bf4c387c36fbbb6c, ASSIGN; state=OFFLINE, location=87fb68174c52,45061,1730982564994; forceNewPlan=false, retain=false 2024-11-07T12:29:26,179 INFO [RS:1;87fb68174c52:42371 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C42371%2C1730982565959, suffix=, logDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959, archiveDir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/oldWALs, maxLogs=32 2024-11-07T12:29:26,180 INFO [RS:1;87fb68174c52:42371 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C42371%2C1730982565959.1730982566180 2024-11-07T12:29:26,186 INFO [RS:1;87fb68174c52:42371 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 2024-11-07T12:29:26,187 DEBUG [RS:1;87fb68174c52:42371 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46487:46487),(127.0.0.1/127.0.0.1:40095:40095)] 2024-11-07T12:29:26,264 INFO [87fb68174c52:41297 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-07T12:29:26,265 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a522e52eecdb10e9bf4c387c36fbbb6c, regionState=OPENING, regionLocation=87fb68174c52,45061,1730982564994 2024-11-07T12:29:26,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a522e52eecdb10e9bf4c387c36fbbb6c, ASSIGN because future has completed 2024-11-07T12:29:26,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a522e52eecdb10e9bf4c387c36fbbb6c, server=87fb68174c52,45061,1730982564994}] 2024-11-07T12:29:26,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:29:26,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:29:26,426 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:29:26,427 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a522e52eecdb10e9bf4c387c36fbbb6c, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:29:26,427 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,427 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:29:26,427 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,428 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,429 INFO [StoreOpener-a522e52eecdb10e9bf4c387c36fbbb6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,430 INFO [StoreOpener-a522e52eecdb10e9bf4c387c36fbbb6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a522e52eecdb10e9bf4c387c36fbbb6c columnFamilyName info 2024-11-07T12:29:26,431 DEBUG [StoreOpener-a522e52eecdb10e9bf4c387c36fbbb6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:29:26,431 INFO [StoreOpener-a522e52eecdb10e9bf4c387c36fbbb6c-1 {}] regionserver.HStore(327): Store=a522e52eecdb10e9bf4c387c36fbbb6c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:29:26,431 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,432 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,432 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,433 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,433 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,434 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,436 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:29:26,437 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a522e52eecdb10e9bf4c387c36fbbb6c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764605, jitterRate=-0.027755707502365112}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:29:26,437 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:26,437 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a522e52eecdb10e9bf4c387c36fbbb6c: Running coprocessor pre-open hook at 1730982566428Writing region info on filesystem at 1730982566428Initializing all the Stores at 1730982566429 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982566429Cleaning up temporary data from old regions at 1730982566433 (+4 ms)Running coprocessor post-open hooks at 1730982566437 (+4 ms)Region opened successfully at 1730982566437 2024-11-07T12:29:26,438 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c., pid=6, masterSystemTime=1730982566421 2024-11-07T12:29:26,441 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:29:26,441 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:29:26,442 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a522e52eecdb10e9bf4c387c36fbbb6c, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,45061,1730982564994 2024-11-07T12:29:26,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a522e52eecdb10e9bf4c387c36fbbb6c, server=87fb68174c52,45061,1730982564994 because future has completed 2024-11-07T12:29:26,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:29:26,449 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a522e52eecdb10e9bf4c387c36fbbb6c, server=87fb68174c52,45061,1730982564994 in 178 msec 2024-11-07T12:29:26,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:29:26,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a522e52eecdb10e9bf4c387c36fbbb6c, ASSIGN in 337 msec 2024-11-07T12:29:26,452 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:29:26,453 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730982566452"}]},"ts":"1730982566452"} 2024-11-07T12:29:26,455 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-07T12:29:26,456 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:29:26,458 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 376 msec 2024-11-07T12:29:26,926 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:29:26,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:29:26,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:29:26,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:29:26,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:29:26,956 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-07T12:29:26,956 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-07T12:29:26,956 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-07T12:29:31,260 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:29:31,261 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-07T12:29:31,929 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:29:31,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:29:31,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:29:31,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:29:31,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:29:36,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41297 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:29:36,188 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-07T12:29:36,188 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-07T12:29:36,191 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-07T12:29:36,191 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:29:36,204 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:36,207 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:29:36,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:29:36,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:29:36,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:29:36,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d0f4a9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:29:36,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ff5703b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:29:36,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2047cbbb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/java.io.tmpdir/jetty-localhost-37533-hadoop-hdfs-3_4_1-tests_jar-_-any-12684854234244055777/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:36,322 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2021586{HTTP/1.1, (http/1.1)}{localhost:37533} 2024-11-07T12:29:36,322 INFO [Time-limited test {}] server.Server(415): Started @114994ms 2024-11-07T12:29:36,323 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:29:36,354 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:36,358 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:29:36,361 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:29:36,361 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:29:36,361 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:29:36,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@519de6b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:29:36,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167a7fde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:29:36,402 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data5/current/BP-1009609649-172.17.0.3-1730982564245/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:36,403 WARN [Thread-831 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data6/current/BP-1009609649-172.17.0.3-1730982564245/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:36,419 WARN [Thread-810 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:29:36,421 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x761f0a1bada3851e with lease ID 0x9849c9739aa860be: Processing first storage report for DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd from datanode DatanodeRegistration(127.0.0.1:44341, datanodeUuid=a3aed02e-58a3-486f-9bf5-945900a9de8a, infoPort=44107, infoSecurePort=0, ipcPort=35589, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245) 2024-11-07T12:29:36,421 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x761f0a1bada3851e with lease ID 0x9849c9739aa860be: from storage DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd node DatanodeRegistration(127.0.0.1:44341, datanodeUuid=a3aed02e-58a3-486f-9bf5-945900a9de8a, infoPort=44107, infoSecurePort=0, ipcPort=35589, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:36,421 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x761f0a1bada3851e with lease ID 0x9849c9739aa860be: Processing first storage report for DS-e584ad66-fd8d-4b25-bed2-59d1832457c0 from datanode DatanodeRegistration(127.0.0.1:44341, datanodeUuid=a3aed02e-58a3-486f-9bf5-945900a9de8a, infoPort=44107, infoSecurePort=0, ipcPort=35589, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245) 2024-11-07T12:29:36,421 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x761f0a1bada3851e with lease ID 0x9849c9739aa860be: from storage DS-e584ad66-fd8d-4b25-bed2-59d1832457c0 node DatanodeRegistration(127.0.0.1:44341, datanodeUuid=a3aed02e-58a3-486f-9bf5-945900a9de8a, infoPort=44107, infoSecurePort=0, ipcPort=35589, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:36,477 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b5be5aa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/java.io.tmpdir/jetty-localhost-39409-hadoop-hdfs-3_4_1-tests_jar-_-any-17325323327058657415/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:36,477 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f7383da{HTTP/1.1, (http/1.1)}{localhost:39409} 2024-11-07T12:29:36,477 INFO [Time-limited test {}] server.Server(415): Started @115150ms 2024-11-07T12:29:36,478 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:29:36,514 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:36,516 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:29:36,517 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:29:36,517 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:29:36,517 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:29:36,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ff2063{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:29:36,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dc59954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:29:36,557 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7/current/BP-1009609649-172.17.0.3-1730982564245/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:36,557 WARN [Thread-866 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8/current/BP-1009609649-172.17.0.3-1730982564245/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:36,580 WARN [Thread-845 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:29:36,583 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ae31a922f433a73 with lease ID 0x9849c9739aa860bf: Processing first storage report for DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962 from datanode DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245) 2024-11-07T12:29:36,583 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ae31a922f433a73 with lease ID 0x9849c9739aa860bf: from storage DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962 node DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:29:36,583 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ae31a922f433a73 with lease ID 0x9849c9739aa860bf: Processing first storage report for DS-43aab324-5026-4efd-90fe-90cb02b824fd from datanode DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245) 2024-11-07T12:29:36,583 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ae31a922f433a73 with lease ID 0x9849c9739aa860bf: from storage DS-43aab324-5026-4efd-90fe-90cb02b824fd node DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:36,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30add41a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/java.io.tmpdir/jetty-localhost-46305-hadoop-hdfs-3_4_1-tests_jar-_-any-14300026556512354929/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:36,639 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fd17220{HTTP/1.1, (http/1.1)}{localhost:46305} 2024-11-07T12:29:36,639 INFO [Time-limited test {}] server.Server(415): Started @115312ms 2024-11-07T12:29:36,640 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:29:36,721 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data9/current/BP-1009609649-172.17.0.3-1730982564245/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:36,721 WARN [Thread-892 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data10/current/BP-1009609649-172.17.0.3-1730982564245/current, will proceed with Du for space computation calculation, 2024-11-07T12:29:36,737 WARN [Thread-880 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:29:36,739 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b5cebac7d7d3df0 with lease ID 0x9849c9739aa860c0: Processing first storage report for DS-b201ff21-27e5-4c46-a980-e9a151a4b43d from datanode DatanodeRegistration(127.0.0.1:35157, datanodeUuid=9af17079-0b3c-4990-8475-04b6d91c1138, infoPort=36961, infoSecurePort=0, ipcPort=35155, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245) 2024-11-07T12:29:36,739 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b5cebac7d7d3df0 with lease ID 0x9849c9739aa860c0: from storage DS-b201ff21-27e5-4c46-a980-e9a151a4b43d node DatanodeRegistration(127.0.0.1:35157, datanodeUuid=9af17079-0b3c-4990-8475-04b6d91c1138, infoPort=36961, infoSecurePort=0, ipcPort=35155, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:36,740 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b5cebac7d7d3df0 with lease ID 0x9849c9739aa860c0: Processing first storage report for DS-e22365c8-5dce-4949-a4f6-5c833a592c19 from datanode DatanodeRegistration(127.0.0.1:35157, datanodeUuid=9af17079-0b3c-4990-8475-04b6d91c1138, infoPort=36961, infoSecurePort=0, ipcPort=35155, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245) 2024-11-07T12:29:36,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b5cebac7d7d3df0 with lease ID 0x9849c9739aa860c0: from storage DS-e22365c8-5dce-4949-a4f6-5c833a592c19 node DatanodeRegistration(127.0.0.1:35157, datanodeUuid=9af17079-0b3c-4990-8475-04b6d91c1138, infoPort=36961, infoSecurePort=0, ipcPort=35155, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:36,760 WARN [ResponseProcessor for block BP-1009609649-172.17.0.3-1730982564245:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1009609649-172.17.0.3-1730982564245:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,760 WARN [ResponseProcessor for block BP-1009609649-172.17.0.3-1730982564245:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1009609649-172.17.0.3-1730982564245:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,760 WARN [ResponseProcessor for block BP-1009609649-172.17.0.3-1730982564245:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1009609649-172.17.0.3-1730982564245:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,761 WARN [ResponseProcessor for block BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,761 WARN [DataStreamer for file /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta block BP-1009609649-172.17.0.3-1730982564245:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:36,761 WARN [DataStreamer for file /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 block BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:36,761 WARN [DataStreamer for file /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 block BP-1009609649-172.17.0.3-1730982564245:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:36,761 WARN [DataStreamer for file /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944/87fb68174c52%2C41297%2C1730982564944.1730982565099 block BP-1009609649-172.17.0.3-1730982564245:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:36,762 WARN [PacketResponder: BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41511] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,762 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:50476 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50476 dst: /127.0.0.1:41511 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,762 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1559346319_22 at /127.0.0.1:50428 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50428 dst: /127.0.0.1:41511 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,762 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:40820 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40820 dst: /127.0.0.1:36671 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,762 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:40808 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40808 dst: /127.0.0.1:36671 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,763 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1194548409_22 at /127.0.0.1:50502 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50502 dst: /127.0.0.1:41511 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,763 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1194548409_22 at /127.0.0.1:40844 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:36671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40844 dst: /127.0.0.1:36671 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,763 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:50468 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50468 dst: /127.0.0.1:41511 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,764 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7330fb3f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:36,764 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1559346319_22 at /127.0.0.1:40786 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40786 dst: /127.0.0.1:36671 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,764 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20aa2ea7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:29:36,764 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:29:36,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ec1a06e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:29:36,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eee535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,STOPPED} 2024-11-07T12:29:36,768 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:29:36,768 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:29:36,768 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1009609649-172.17.0.3-1730982564245 (Datanode Uuid e50dee72-73bb-452a-a5d4-1fb4c3bf8057) service to localhost/127.0.0.1:39665 2024-11-07T12:29:36,768 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:29:36,768 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data3/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:36,769 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data4/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:36,769 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:29:36,769 WARN [DataStreamer for file /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta block BP-1009609649-172.17.0.3-1730982564245:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,770 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@7512e08c {}] datanode.DataXceiver(331): 127.0.0.1:36671:DataXceiver error processing unknown operation src: /127.0.0.1:54932 dst: /127.0.0.1:36671 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,770 WARN [DataStreamer for file /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944/87fb68174c52%2C41297%2C1730982564944.1730982565099 block BP-1009609649-172.17.0.3-1730982564245:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,771 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:54908 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54908 dst: /127.0.0.1:36671 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,772 WARN [ResponseProcessor for block BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1009609649-172.17.0.3-1730982564245:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,772 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1194548409_22 at /127.0.0.1:54918 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:36671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54918 dst: /127.0.0.1:36671 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:36,773 WARN [ResponseProcessor for block BP-1009609649-172.17.0.3-1730982564245:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1009609649-172.17.0.3-1730982564245:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,775 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14d09ab9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:36,775 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ee6b493{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:29:36,775 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:29:36,775 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48743db4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:29:36,775 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a107105{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,STOPPED} 2024-11-07T12:29:36,776 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:29:36,776 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:29:36,776 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:29:36,776 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1009609649-172.17.0.3-1730982564245 (Datanode Uuid b39723c6-9828-464f-9004-aeee40a8d52a) service to localhost/127.0.0.1:39665 2024-11-07T12:29:36,777 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data1/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:36,777 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data2/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:36,777 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:29:36,781 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c., hostname=87fb68174c52,45061,1730982564994, seqNum=2] 2024-11-07T12:29:36,783 ERROR [FSHLog-0-hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c-prefix:87fb68174c52,45061,1730982564994 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,783 WARN [FSHLog-0-hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c-prefix:87fb68174c52,45061,1730982564994 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,783 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,783 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C45061%2C1730982564994:(num 1730982565406) roll requested 2024-11-07T12:29:36,784 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C45061%2C1730982564994.1730982576784 2024-11-07T12:29:36,789 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:36,790 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:36,790 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:36,790 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:36,790 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:36,790 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982576784 2024-11-07T12:29:36,790 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,791 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:36,791 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-07T12:29:36,792 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-07T12:29:36,792 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 2024-11-07T12:29:36,793 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44107:44107),(127.0.0.1/127.0.0.1:36961:36961)] 2024-11-07T12:29:36,793 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 is not closed yet, will try archiving it next time 2024-11-07T12:29:36,794 WARN [IPC Server handler 3 on default port 39665 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1015 2024-11-07T12:29:36,798 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 after 4ms 2024-11-07T12:29:37,076 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:38,047 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:38,793 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:38,794 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982576784 2024-11-07T12:29:38,795 WARN [ResponseProcessor for block BP-1009609649-172.17.0.3-1730982564245:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1009609649-172.17.0.3-1730982564245:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:38,795 WARN [DataStreamer for file /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982576784 block BP-1009609649-172.17.0.3-1730982564245:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK], DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:38,796 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:45202 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45202 dst: /127.0.0.1:44341 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:38,796 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:34468 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35157:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34468 dst: /127.0.0.1:35157 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:38,797 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2047cbbb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:38,797 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2021586{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:29:38,797 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:29:38,797 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ff5703b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:29:38,798 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d0f4a9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,STOPPED} 2024-11-07T12:29:38,799 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:29:38,799 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1009609649-172.17.0.3-1730982564245 (Datanode Uuid a3aed02e-58a3-486f-9bf5-945900a9de8a) service to localhost/127.0.0.1:39665 2024-11-07T12:29:38,799 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:29:38,799 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:29:38,800 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data5/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:38,800 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data6/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:38,800 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:29:39,077 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:40,047 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:40,794 WARN [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]] 2024-11-07T12:29:40,794 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:40,794 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C45061%2C1730982564994:(num 1730982576784) roll requested 2024-11-07T12:29:40,795 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C45061%2C1730982564994.1730982580794 2024-11-07T12:29:40,799 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 after 4006ms 2024-11-07T12:29:40,801 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:40,801 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:40,801 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:40,801 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:40,801 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:40,801 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982576784 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982580794 2024-11-07T12:29:40,803 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36961:36961),(127.0.0.1/127.0.0.1:44117:44117)] 2024-11-07T12:29:40,803 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 is not closed yet, will try archiving it next time 2024-11-07T12:29:40,803 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982576784 is not closed yet, will try archiving it next time 2024-11-07T12:29:40,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35157 is added to blk_1073741838_1020 (size=2431) 2024-11-07T12:29:40,805 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T12:29:41,077 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:41,204 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 is not closed yet, will try archiving it next time 2024-11-07T12:29:42,048 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:42,751 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3fd76530[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35157, datanodeUuid=9af17079-0b3c-4990-8475-04b6d91c1138, infoPort=36961, infoSecurePort=0, ipcPort=35155, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741838_1020 to 127.0.0.1:41511 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:42,803 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:42,809 WARN [ResponseProcessor for block BP-1009609649-172.17.0.3-1730982564245:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1009609649-172.17.0.3-1730982564245:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:42,809 WARN [DataStreamer for file /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982580794 block BP-1009609649-172.17.0.3-1730982564245:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:42,809 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:53350 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:35157:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53350 dst: /127.0.0.1:35157 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:42,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44740 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44740 dst: /127.0.0.1:37967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:42,811 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30add41a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:42,811 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fd17220{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:29:42,811 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:29:42,811 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dc59954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:29:42,811 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ff2063{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,STOPPED} 2024-11-07T12:29:42,813 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:29:42,813 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:29:42,813 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1009609649-172.17.0.3-1730982564245 (Datanode Uuid 9af17079-0b3c-4990-8475-04b6d91c1138) service to localhost/127.0.0.1:39665 2024-11-07T12:29:42,813 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:29:42,813 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data9/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:42,814 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data10/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:29:42,814 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:29:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45061 {}] regionserver.HRegion(8855): Flush requested on a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:42,822 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a522e52eecdb10e9bf4c387c36fbbb6c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:29:42,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/ab341a45c7c94e09b42cfb56965bb998 is 1080, key is row0002/info:/1730982578801/Put/seqid=0 2024-11-07T12:29:42,841 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:42,841 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741840_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:42,841 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741840_1023 2024-11-07T12:29:42,844 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:29:42,847 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:42,847 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:42,847 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741841_1024 2024-11-07T12:29:42,848 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK] 2024-11-07T12:29:42,849 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:42,849 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:29:42,849 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741842_1025 2024-11-07T12:29:42,849 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:29:42,851 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:42,851 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44760 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741843_1026 to mirror 127.0.0.1:44341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:42,851 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:42,851 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741843_1026 2024-11-07T12:29:42,851 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44760 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:29:42,852 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44760 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44760 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:42,852 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:29:42,852 WARN [IPC Server handler 4 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:29:42,853 WARN [IPC Server handler 4 on default port 39665 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:29:42,853 WARN [IPC Server handler 4 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:29:42,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741844_1027 (size=10347) 2024-11-07T12:29:43,077 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:43,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/ab341a45c7c94e09b42cfb56965bb998 2024-11-07T12:29:43,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/ab341a45c7c94e09b42cfb56965bb998 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/ab341a45c7c94e09b42cfb56965bb998 2024-11-07T12:29:43,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/ab341a45c7c94e09b42cfb56965bb998, entries=5, sequenceid=11, filesize=10.1 K 2024-11-07T12:29:43,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for a522e52eecdb10e9bf4c387c36fbbb6c in 448ms, sequenceid=11, compaction requested=false 2024-11-07T12:29:43,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a522e52eecdb10e9bf4c387c36fbbb6c: 2024-11-07T12:29:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45061 {}] regionserver.HRegion(8855): Flush requested on a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:43,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a522e52eecdb10e9bf4c387c36fbbb6c 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-07T12:29:43,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/2199b9d7937e4bed8777da169958fcb4 is 1080, key is row0007/info:/1730982582823/Put/seqid=0 2024-11-07T12:29:43,449 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:43,449 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:29:43,449 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741845_1028 2024-11-07T12:29:43,450 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:29:43,452 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35157 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:43,452 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44786 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741846_1029 to mirror 127.0.0.1:35157 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:43,452 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:43,452 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741846_1029 2024-11-07T12:29:43,452 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44786 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:29:43,452 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44786 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44786 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:43,453 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:29:43,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44798 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741847_1030 to mirror 127.0.0.1:44341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:43,455 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:43,455 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44798 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:29:43,455 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:43,455 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741847_1030 2024-11-07T12:29:43,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44798 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44798 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:43,456 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:29:43,458 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41511 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:43,457 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44810 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741848_1031 to mirror 127.0.0.1:41511 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:43,458 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:43,458 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741848_1031 2024-11-07T12:29:43,458 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44810 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:29:43,458 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44810 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44810 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:43,458 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK] 2024-11-07T12:29:43,459 WARN [IPC Server handler 4 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:29:43,459 WARN [IPC Server handler 4 on default port 39665 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:29:43,459 WARN [IPC Server handler 4 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:29:43,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741849_1032 (size=12506) 2024-11-07T12:29:43,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/2199b9d7937e4bed8777da169958fcb4 2024-11-07T12:29:43,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/2199b9d7937e4bed8777da169958fcb4 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/2199b9d7937e4bed8777da169958fcb4 2024-11-07T12:29:43,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/2199b9d7937e4bed8777da169958fcb4, entries=7, sequenceid=24, filesize=12.2 K 2024-11-07T12:29:43,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for a522e52eecdb10e9bf4c387c36fbbb6c in 434ms, sequenceid=24, compaction requested=false 2024-11-07T12:29:43,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a522e52eecdb10e9bf4c387c36fbbb6c: 2024-11-07T12:29:43,877 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-07T12:29:43,877 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:43,877 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/2199b9d7937e4bed8777da169958fcb4 because midkey is the same as first or last row 2024-11-07T12:29:44,048 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:44,803 WARN [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]] 2024-11-07T12:29:44,803 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:44,803 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C45061%2C1730982564994:(num 1730982580794) roll requested 2024-11-07T12:29:44,804 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C45061%2C1730982564994.1730982584804 2024-11-07T12:29:44,807 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:44,807 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:44,807 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741850_1033 2024-11-07T12:29:44,807 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK] 2024-11-07T12:29:44,808 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:44,809 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:29:44,809 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741851_1034 2024-11-07T12:29:44,809 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:29:44,810 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:44,810 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:44,810 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741852_1035 2024-11-07T12:29:44,811 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:29:44,812 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:44,812 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:44,812 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741853_1036 2024-11-07T12:29:44,813 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:29:44,813 WARN [IPC Server handler 3 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:29:44,813 WARN [IPC Server handler 3 on default port 39665 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:29:44,813 WARN [IPC Server handler 3 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:29:44,816 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:44,816 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:44,816 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:44,816 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:44,816 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:44,816 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982580794 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982584804 2024-11-07T12:29:44,817 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44117:44117)] 2024-11-07T12:29:44,817 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 is not closed yet, will try archiving it next time 2024-11-07T12:29:44,817 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982580794 is not closed yet, will try archiving it next time 2024-11-07T12:29:44,818 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982576784 to hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/oldWALs/87fb68174c52%2C45061%2C1730982564994.1730982576784 2024-11-07T12:29:44,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741839_1022 (size=25992) 2024-11-07T12:29:44,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45061 {}] regionserver.HRegion(8855): Flush requested on a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:44,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a522e52eecdb10e9bf4c387c36fbbb6c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-07T12:29:44,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/f78819e18bcd4076b306a86416f770c4 is 1079, key is tmprow/info:/1730982584859/Put/seqid=0 2024-11-07T12:29:44,866 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:44,866 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:29:44,866 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741855_1038 2024-11-07T12:29:44,867 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:29:44,868 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:44,868 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:44,868 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741856_1039 2024-11-07T12:29:44,869 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK] 2024-11-07T12:29:44,871 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35157 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:44,871 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44830 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741857_1040 to mirror 127.0.0.1:35157 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:44,871 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:44,871 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741857_1040 2024-11-07T12:29:44,871 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44830 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:29:44,871 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44830 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44830 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:44,871 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:29:44,873 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:44,873 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44840 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741858_1041 to mirror 127.0.0.1:44341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:44,873 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:44,873 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741858_1041 2024-11-07T12:29:44,873 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44840 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:29:44,873 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44840 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44840 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:44,874 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:29:44,874 WARN [IPC Server handler 4 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:29:44,875 WARN [IPC Server handler 4 on default port 39665 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:29:44,875 WARN [IPC Server handler 4 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:29:44,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741859_1042 (size=6027) 2024-11-07T12:29:45,078 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:45,219 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 is not closed yet, will try archiving it next time 2024-11-07T12:29:45,278 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/f78819e18bcd4076b306a86416f770c4 2024-11-07T12:29:45,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/f78819e18bcd4076b306a86416f770c4 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/f78819e18bcd4076b306a86416f770c4 2024-11-07T12:29:45,292 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/f78819e18bcd4076b306a86416f770c4, entries=1, sequenceid=34, filesize=5.9 K 2024-11-07T12:29:45,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a522e52eecdb10e9bf4c387c36fbbb6c in 433ms, sequenceid=34, compaction requested=true 2024-11-07T12:29:45,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a522e52eecdb10e9bf4c387c36fbbb6c: 2024-11-07T12:29:45,293 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-07T12:29:45,293 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:45,293 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/2199b9d7937e4bed8777da169958fcb4 because midkey is the same as first or last row 2024-11-07T12:29:45,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a522e52eecdb10e9bf4c387c36fbbb6c:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:29:45,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:29:45,294 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:29:45,295 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:29:45,295 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HStore(1541): a522e52eecdb10e9bf4c387c36fbbb6c/info is initiating minor compaction (all files) 2024-11-07T12:29:45,295 INFO [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a522e52eecdb10e9bf4c387c36fbbb6c/info in TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:29:45,295 INFO [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/ab341a45c7c94e09b42cfb56965bb998, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/2199b9d7937e4bed8777da169958fcb4, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/f78819e18bcd4076b306a86416f770c4] into tmpdir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp, totalSize=28.2 K 2024-11-07T12:29:45,296 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] compactions.Compactor(225): Compacting ab341a45c7c94e09b42cfb56965bb998, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1730982578801 2024-11-07T12:29:45,296 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2199b9d7937e4bed8777da169958fcb4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1730982582823 2024-11-07T12:29:45,296 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] compactions.Compactor(225): Compacting f78819e18bcd4076b306a86416f770c4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1730982584859 2024-11-07T12:29:45,309 INFO [RS:0;87fb68174c52:45061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a522e52eecdb10e9bf4c387c36fbbb6c#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:29:45,310 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/61f3fe28946e478a80516f2c5b2d976e is 1080, key is row0002/info:/1730982578801/Put/seqid=0 2024-11-07T12:29:45,311 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:45,312 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:29:45,312 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741860_1043 2024-11-07T12:29:45,312 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:29:45,313 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:45,313 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:45,313 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741861_1044 2024-11-07T12:29:45,314 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:29:45,315 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41511 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:45,315 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44886 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741862_1045 to mirror 127.0.0.1:41511 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:45,316 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:45,316 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741862_1045 2024-11-07T12:29:45,316 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44886 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:29:45,316 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:44886 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44886 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:45,316 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK] 2024-11-07T12:29:45,317 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:45,317 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:45,317 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741863_1046 2024-11-07T12:29:45,318 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:29:45,318 WARN [IPC Server handler 1 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:29:45,318 WARN [IPC Server handler 1 on default port 39665 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:29:45,318 WARN [IPC Server handler 1 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:29:45,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741864_1047 (size=17994) 2024-11-07T12:29:45,583 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3abe0717[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741844_1027 to 127.0.0.1:35157 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:45,583 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3479fe30[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741849_1032 to 127.0.0.1:36671 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:45,728 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/61f3fe28946e478a80516f2c5b2d976e as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/61f3fe28946e478a80516f2c5b2d976e 2024-11-07T12:29:45,734 INFO [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a522e52eecdb10e9bf4c387c36fbbb6c/info of a522e52eecdb10e9bf4c387c36fbbb6c into 61f3fe28946e478a80516f2c5b2d976e(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:29:45,734 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a522e52eecdb10e9bf4c387c36fbbb6c: 2024-11-07T12:29:45,734 INFO [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c., storeName=a522e52eecdb10e9bf4c387c36fbbb6c/info, priority=13, startTime=1730982585293; duration=0sec 2024-11-07T12:29:45,734 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-07T12:29:45,734 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:45,735 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/61f3fe28946e478a80516f2c5b2d976e because midkey is the same as first or last row 2024-11-07T12:29:45,735 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-07T12:29:45,735 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:45,735 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/61f3fe28946e478a80516f2c5b2d976e because midkey is the same as first or last row 2024-11-07T12:29:45,735 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-07T12:29:45,735 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:45,735 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/61f3fe28946e478a80516f2c5b2d976e because midkey is the same as first or last row 2024-11-07T12:29:45,735 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:29:45,735 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a522e52eecdb10e9bf4c387c36fbbb6c:info 2024-11-07T12:29:46,048 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45061 {}] regionserver.HRegion(8855): Flush requested on a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:46,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a522e52eecdb10e9bf4c387c36fbbb6c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-07T12:29:46,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/139453e68d094ba783c06ebafbe9c171 is 1079, key is tmprow/info:/1730982586277/Put/seqid=0 2024-11-07T12:29:46,284 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:46,284 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK], DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:46,285 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741865_1048 2024-11-07T12:29:46,285 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:29:46,286 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:46,286 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:46,286 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741866_1049 2024-11-07T12:29:46,287 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK] 2024-11-07T12:29:46,288 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:46,288 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK], DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:46,288 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741867_1050 2024-11-07T12:29:46,288 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:29:46,289 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:46,289 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:29:46,289 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741868_1051 2024-11-07T12:29:46,290 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:29:46,290 WARN [IPC Server handler 3 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:29:46,290 WARN [IPC Server handler 3 on default port 39665 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:29:46,290 WARN [IPC Server handler 3 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:29:46,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741869_1052 (size=6027) 2024-11-07T12:29:46,583 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3abe0717[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741859_1042 to 127.0.0.1:36671 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:46,583 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3479fe30[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741839_1022 to 127.0.0.1:35157 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:46,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/139453e68d094ba783c06ebafbe9c171 2024-11-07T12:29:46,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/139453e68d094ba783c06ebafbe9c171 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/139453e68d094ba783c06ebafbe9c171 2024-11-07T12:29:46,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/139453e68d094ba783c06ebafbe9c171, entries=1, sequenceid=45, filesize=5.9 K 2024-11-07T12:29:46,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a522e52eecdb10e9bf4c387c36fbbb6c in 429ms, sequenceid=45, compaction requested=false 2024-11-07T12:29:46,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a522e52eecdb10e9bf4c387c36fbbb6c: 2024-11-07T12:29:46,707 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-07T12:29:46,707 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:46,707 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/61f3fe28946e478a80516f2c5b2d976e because midkey is the same as first or last row 2024-11-07T12:29:46,818 WARN [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]] 2024-11-07T12:29:46,818 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:46,818 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C45061%2C1730982564994:(num 1730982584804) roll requested 2024-11-07T12:29:46,818 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C45061%2C1730982564994.1730982586818 2024-11-07T12:29:46,822 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:46,822 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:38970 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741870_1053 to mirror 127.0.0.1:44341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:46,822 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:46,822 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741870_1053 2024-11-07T12:29:46,822 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:38970 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-07T12:29:46,822 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:38970 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38970 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:46,823 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:29:46,824 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35157 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:46,824 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:38980 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741871_1054 to mirror 127.0.0.1:35157 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:46,825 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:46,825 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741871_1054 2024-11-07T12:29:46,825 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:38980 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-07T12:29:46,825 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:38980 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38980 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:46,825 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:29:46,826 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:46,826 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:46,826 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741872_1055 2024-11-07T12:29:46,827 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK] 2024-11-07T12:29:46,828 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:46,828 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:29:46,828 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741873_1056 2024-11-07T12:29:46,828 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:29:46,829 WARN [IPC Server handler 3 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:29:46,829 WARN [IPC Server handler 3 on default port 39665 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:29:46,829 WARN [IPC Server handler 3 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:29:46,831 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:46,831 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:46,831 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:46,831 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:46,832 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:46,832 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982584804 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982586818 2024-11-07T12:29:46,832 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44117:44117)] 2024-11-07T12:29:46,832 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 is not closed yet, will try archiving it next time 2024-11-07T12:29:46,832 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982584804 is not closed yet, will try archiving it next time 2024-11-07T12:29:46,833 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982580794 to hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/oldWALs/87fb68174c52%2C45061%2C1730982564994.1730982580794 2024-11-07T12:29:46,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741854_1037 (size=13591) 2024-11-07T12:29:47,078 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:47,234 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 is not closed yet, will try archiving it next time 2024-11-07T12:29:47,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45061 {}] regionserver.HRegion(8855): Flush requested on a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:29:47,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a522e52eecdb10e9bf4c387c36fbbb6c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-07T12:29:47,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/d86f2ec979584973a2fe06c2205867c2 is 1079, key is tmprow/info:/1730982587695/Put/seqid=0 2024-11-07T12:29:47,702 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:47,702 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK], DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:47,703 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741875_1058 2024-11-07T12:29:47,703 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:29:47,704 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:47,704 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:47,704 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741876_1059 2024-11-07T12:29:47,705 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:29:47,706 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:47,706 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:47,706 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741877_1060 2024-11-07T12:29:47,707 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK] 2024-11-07T12:29:47,708 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:47,708 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:29:47,708 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741878_1061 2024-11-07T12:29:47,708 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:29:47,709 WARN [IPC Server handler 1 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:29:47,709 WARN [IPC Server handler 1 on default port 39665 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:29:47,709 WARN [IPC Server handler 1 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:29:47,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741879_1062 (size=6027) 2024-11-07T12:29:48,049 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:48,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/d86f2ec979584973a2fe06c2205867c2 2024-11-07T12:29:48,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/d86f2ec979584973a2fe06c2205867c2 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/d86f2ec979584973a2fe06c2205867c2 2024-11-07T12:29:48,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/d86f2ec979584973a2fe06c2205867c2, entries=1, sequenceid=55, filesize=5.9 K 2024-11-07T12:29:48,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a522e52eecdb10e9bf4c387c36fbbb6c in 429ms, sequenceid=55, compaction requested=true 2024-11-07T12:29:48,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a522e52eecdb10e9bf4c387c36fbbb6c: 2024-11-07T12:29:48,125 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-07T12:29:48,125 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:48,125 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/61f3fe28946e478a80516f2c5b2d976e because midkey is the same as first or last row 2024-11-07T12:29:48,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a522e52eecdb10e9bf4c387c36fbbb6c:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:29:48,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:29:48,125 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:29:48,126 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:29:48,127 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HStore(1541): a522e52eecdb10e9bf4c387c36fbbb6c/info is initiating minor compaction (all files) 2024-11-07T12:29:48,127 INFO [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a522e52eecdb10e9bf4c387c36fbbb6c/info in TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:29:48,127 INFO [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/61f3fe28946e478a80516f2c5b2d976e, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/139453e68d094ba783c06ebafbe9c171, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/d86f2ec979584973a2fe06c2205867c2] into tmpdir=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp, totalSize=29.3 K 2024-11-07T12:29:48,127 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 61f3fe28946e478a80516f2c5b2d976e, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1730982578801 2024-11-07T12:29:48,127 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 139453e68d094ba783c06ebafbe9c171, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1730982586277 2024-11-07T12:29:48,128 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] compactions.Compactor(225): Compacting d86f2ec979584973a2fe06c2205867c2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730982587695 2024-11-07T12:29:48,141 INFO [RS:0;87fb68174c52:45061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a522e52eecdb10e9bf4c387c36fbbb6c#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:29:48,142 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/b1d26e45cfa64fbf80ff32c7231e1a54 is 1080, key is row0002/info:/1730982578801/Put/seqid=0 2024-11-07T12:29:48,143 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:48,144 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK], DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:48,144 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741880_1063 2024-11-07T12:29:48,144 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:29:48,146 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:48,146 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:48,146 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:39010 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741881_1064 to mirror 127.0.0.1:44341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:48,146 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741881_1064 2024-11-07T12:29:48,146 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:39010 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:29:48,147 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:39010 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39010 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:48,147 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:29:48,148 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:48,148 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:29:48,148 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741882_1065 2024-11-07T12:29:48,148 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:29:48,150 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41511 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:48,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:39020 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8]'}, localName='127.0.0.1:37967', datanodeUuid='ec31049c-c9de-4285-9e6d-5a8b30ce0dd0', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741883_1066 to mirror 127.0.0.1:41511 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:48,150 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]) is bad. 2024-11-07T12:29:48,150 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741883_1066 2024-11-07T12:29:48,150 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:39020 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:29:48,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:39020 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:37967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39020 dst: /127.0.0.1:37967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:48,151 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK] 2024-11-07T12:29:48,151 WARN [IPC Server handler 3 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:29:48,151 WARN [IPC Server handler 3 on default port 39665 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:29:48,151 WARN [IPC Server handler 3 on default port 39665 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:29:48,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741884_1067 (size=18097) 2024-11-07T12:29:48,561 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/b1d26e45cfa64fbf80ff32c7231e1a54 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/b1d26e45cfa64fbf80ff32c7231e1a54 2024-11-07T12:29:48,567 INFO [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a522e52eecdb10e9bf4c387c36fbbb6c/info of a522e52eecdb10e9bf4c387c36fbbb6c into b1d26e45cfa64fbf80ff32c7231e1a54(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a522e52eecdb10e9bf4c387c36fbbb6c: 2024-11-07T12:29:48,568 INFO [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c., storeName=a522e52eecdb10e9bf4c387c36fbbb6c/info, priority=13, startTime=1730982588125; duration=0sec 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/b1d26e45cfa64fbf80ff32c7231e1a54 because midkey is the same as first or last row 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/b1d26e45cfa64fbf80ff32c7231e1a54 because midkey is the same as first or last row 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/b1d26e45cfa64fbf80ff32c7231e1a54 because midkey is the same as first or last row 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:29:48,568 DEBUG [RS:0;87fb68174c52:45061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a522e52eecdb10e9bf4c387c36fbbb6c:info 2024-11-07T12:29:48,583 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3abe0717[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741869_1052 to 127.0.0.1:44341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:48,583 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3479fe30[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741864_1047 to 127.0.0.1:44341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:48,833 WARN [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-07T12:29:48,833 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:48,918 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:29:48,920 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:29:48,921 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:29:48,921 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:29:48,921 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:29:48,922 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39eb7ad6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:29:48,922 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aaad1eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:29:49,036 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e407f59{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/java.io.tmpdir/jetty-localhost-36041-hadoop-hdfs-3_4_1-tests_jar-_-any-2102875809283251550/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:29:49,037 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@413b5d87{HTTP/1.1, (http/1.1)}{localhost:36041} 2024-11-07T12:29:49,037 INFO [Time-limited test {}] server.Server(415): Started @127709ms 2024-11-07T12:29:49,038 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:29:49,078 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:49,124 WARN [Thread-996 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:29:49,128 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3372afb38439b40a with lease ID 0x9849c9739aa860c1: from storage DS-c263f208-3cd5-400f-be78-6d95bfd6cf86 node DatanodeRegistration(127.0.0.1:37921, datanodeUuid=e50dee72-73bb-452a-a5d4-1fb4c3bf8057, infoPort=38549, infoSecurePort=0, ipcPort=37467, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:29:49,128 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3372afb38439b40a with lease ID 0x9849c9739aa860c1: from storage DS-094b765d-ef82-45b9-ab89-493e766bd3c9 node DatanodeRegistration(127.0.0.1:37921, datanodeUuid=e50dee72-73bb-452a-a5d4-1fb4c3bf8057, infoPort=38549, infoSecurePort=0, ipcPort=37467, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:29:49,583 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3abe0717[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741879_1062 to 127.0.0.1:35157 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:49,583 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3479fe30[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741854_1037 to 127.0.0.1:44341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:50,049 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:50,833 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:51,079 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:51,583 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3479fe30[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741884_1067 to 127.0.0.1:44341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:52,049 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:52,833 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:53,079 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:54,049 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:54,834 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:54,924 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:29:55,079 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:55,194 ERROR [FSHLog-0-hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData-prefix:87fb68174c52,41297,1730982564944 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:55,194 WARN [FSHLog-0-hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData-prefix:87fb68174c52,41297,1730982564944 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:55,194 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C41297%2C1730982564944:(num 1730982565099) roll requested 2024-11-07T12:29:55,195 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C41297%2C1730982564944.1730982595195 2024-11-07T12:29:55,199 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:55,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1559346319_22 at /127.0.0.1:44312 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data4]'}, localName='127.0.0.1:37921', datanodeUuid='e50dee72-73bb-452a-a5d4-1fb4c3bf8057', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741885_1068 to mirror 127.0.0.1:44341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:55,199 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37921,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:29:55,199 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741885_1068 2024-11-07T12:29:55,199 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1559346319_22 at /127.0.0.1:44312 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-07T12:29:55,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1559346319_22 at /127.0.0.1:44312 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:37921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44312 dst: /127.0.0.1:37921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:29:55,200 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:29:55,201 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:55,201 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:29:55,201 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741886_1069 2024-11-07T12:29:55,202 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:29:55,206 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:55,206 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:55,206 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:55,206 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:55,206 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:29:55,206 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944/87fb68174c52%2C41297%2C1730982564944.1730982565099 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944/87fb68174c52%2C41297%2C1730982564944.1730982595195 2024-11-07T12:29:55,207 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:55,207 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:55,207 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944/87fb68174c52%2C41297%2C1730982564944.1730982565099 2024-11-07T12:29:55,207 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38549:38549),(127.0.0.1/127.0.0.1:44117:44117)] 2024-11-07T12:29:55,207 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944/87fb68174c52%2C41297%2C1730982564944.1730982565099 is not closed yet, will try archiving it next time 2024-11-07T12:29:55,207 WARN [IPC Server handler 2 on default port 39665 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944/87fb68174c52%2C41297%2C1730982564944.1730982565099 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741830_1006 2024-11-07T12:29:55,208 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944/87fb68174c52%2C41297%2C1730982564944.1730982565099 after 0ms 2024-11-07T12:29:56,050 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:56,834 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:58,050 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:58,834 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:29:59,209 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/WALs/87fb68174c52,41297,1730982564944/87fb68174c52%2C41297%2C1730982564944.1730982565099 after 4002ms 2024-11-07T12:30:00,050 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:00,835 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:02,051 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:02,757 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C45061%2C1730982564994.1730982602756 2024-11-07T12:30:02,759 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:02,760 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK], DatanodeInfoWithStorage[127.0.0.1:37921,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:30:02,760 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741888_1072 2024-11-07T12:30:02,760 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:30:02,761 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:02,762 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:37921,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:30:02,762 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741889_1073 2024-11-07T12:30:02,762 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:30:02,763 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:02,763 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:30:02,763 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741890_1074 2024-11-07T12:30:02,764 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:30:02,768 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:02,768 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:02,768 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:02,768 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:02,768 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:02,768 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982586818 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982602756 2024-11-07T12:30:02,769 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38549:38549),(127.0.0.1/127.0.0.1:44117:44117)] 2024-11-07T12:30:02,769 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 is not closed yet, will try archiving it next time 2024-11-07T12:30:02,769 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982586818 is not closed yet, will try archiving it next time 2024-11-07T12:30:02,769 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982584804 to hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/oldWALs/87fb68174c52%2C45061%2C1730982564994.1730982584804 2024-11-07T12:30:02,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741874_1057 (size=12911) 2024-11-07T12:30:02,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45061 {}] regionserver.HRegion(8855): Flush requested on a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:30:02,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a522e52eecdb10e9bf4c387c36fbbb6c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-07T12:30:02,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/65e7a96ed4b548379b13e056efcbc5e2 is 1080, key is row0013/info:/1730982602771/Put/seqid=0 2024-11-07T12:30:02,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741892_1076 (size=8190) 2024-11-07T12:30:02,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741892_1076 (size=8190) 2024-11-07T12:30:02,835 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-07T12:30:02,835 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:02,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:30:02,992 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:30:02,992 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:30:02,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:02,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:02,992 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:30:02,992 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:30:02,993 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1223048069, stopped=false 2024-11-07T12:30:02,993 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87fb68174c52,41297,1730982564944 2024-11-07T12:30:02,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42371-0x1001a37dedd0002, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:30:02,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:30:02,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:30:02,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42371-0x1001a37dedd0002, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:02,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:02,995 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:30:02,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:02,995 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:30:02,995 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:30:02,995 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:02,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:30:02,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:30:02,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42371-0x1001a37dedd0002, quorum=127.0.0.1:62457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:30:02,996 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87fb68174c52,45061,1730982564994' ***** 2024-11-07T12:30:02,996 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:30:02,996 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87fb68174c52,42371,1730982565959' ***** 2024-11-07T12:30:02,996 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:30:02,996 INFO [RS:1;87fb68174c52:42371 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:30:02,996 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:30:02,996 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:30:02,996 INFO [RS:1;87fb68174c52:42371 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:30:02,996 INFO [RS:1;87fb68174c52:42371 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:30:02,996 INFO [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(959): stopping server 87fb68174c52,42371,1730982565959 2024-11-07T12:30:02,997 INFO [RS:1;87fb68174c52:42371 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:30:02,997 INFO [RS:1;87fb68174c52:42371 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;87fb68174c52:42371. 2024-11-07T12:30:02,997 DEBUG [RS:1;87fb68174c52:42371 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:30:02,997 DEBUG [RS:1;87fb68174c52:42371 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:02,997 INFO [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(976): stopping server 87fb68174c52,42371,1730982565959; all regions closed. 2024-11-07T12:30:02,997 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:02,997 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:02,998 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:02,998 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:02,998 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:02,998 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:02,998 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:02,998 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 2024-11-07T12:30:02,999 WARN [IPC Server handler 3 on default port 39665 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1016 2024-11-07T12:30:02,999 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 after 1ms 2024-11-07T12:30:03,048 INFO [regionserver/87fb68174c52:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T12:30:03,048 INFO [regionserver/87fb68174c52:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T12:30:03,171 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 is not closed yet, will try archiving it next time 2024-11-07T12:30:03,188 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/65e7a96ed4b548379b13e056efcbc5e2 2024-11-07T12:30:03,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/65e7a96ed4b548379b13e056efcbc5e2 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/65e7a96ed4b548379b13e056efcbc5e2 2024-11-07T12:30:03,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/65e7a96ed4b548379b13e056efcbc5e2, entries=3, sequenceid=66, filesize=8.0 K 2024-11-07T12:30:03,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for a522e52eecdb10e9bf4c387c36fbbb6c in 428ms, sequenceid=66, compaction requested=false 2024-11-07T12:30:03,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a522e52eecdb10e9bf4c387c36fbbb6c: 2024-11-07T12:30:03,202 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-07T12:30:03,202 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:30:03,202 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/b1d26e45cfa64fbf80ff32c7231e1a54 because midkey is the same as first or last row 2024-11-07T12:30:03,202 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:30:03,202 INFO [RS:0;87fb68174c52:45061 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:30:03,202 INFO [RS:0;87fb68174c52:45061 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:30:03,202 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(3091): Received CLOSE for a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:30:03,203 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(959): stopping server 87fb68174c52,45061,1730982564994 2024-11-07T12:30:03,203 INFO [RS:0;87fb68174c52:45061 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:30:03,203 INFO [RS:0;87fb68174c52:45061 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87fb68174c52:45061. 2024-11-07T12:30:03,203 DEBUG [RS:0;87fb68174c52:45061 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:30:03,203 DEBUG [RS:0;87fb68174c52:45061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:03,203 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a522e52eecdb10e9bf4c387c36fbbb6c, disabling compactions & flushes 2024-11-07T12:30:03,203 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:30:03,203 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:30:03,203 INFO [RS:0;87fb68174c52:45061 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:30:03,203 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. after waiting 0 ms 2024-11-07T12:30:03,203 INFO [RS:0;87fb68174c52:45061 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:30:03,203 INFO [RS:0;87fb68174c52:45061 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:30:03,203 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:30:03,203 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:30:03,203 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a522e52eecdb10e9bf4c387c36fbbb6c 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-07T12:30:03,203 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-07T12:30:03,204 DEBUG [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(1325): Online Regions={a522e52eecdb10e9bf4c387c36fbbb6c=TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c., 1588230740=hbase:meta,,1.1588230740} 2024-11-07T12:30:03,204 DEBUG [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a522e52eecdb10e9bf4c387c36fbbb6c 2024-11-07T12:30:03,204 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:30:03,204 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:30:03,204 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:30:03,204 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:30:03,204 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:30:03,204 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-07T12:30:03,204 ERROR [FSHLog-0-hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c-prefix:87fb68174c52,45061,1730982564994.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:03,204 WARN [FSHLog-0-hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c-prefix:87fb68174c52,45061,1730982564994.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:03,205 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C45061%2C1730982564994.meta:.meta(num 1730982565817) roll requested 2024-11-07T12:30:03,205 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C45061%2C1730982564994.meta.1730982603205.meta 2024-11-07T12:30:03,209 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/868ae9ec71af48839c64f24ce21b72ad is 1080, key is row0015/info:/1730982602775/Put/seqid=0 2024-11-07T12:30:03,210 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:03,210 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]) is bad. 2024-11-07T12:30:03,210 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741894_1079 2024-11-07T12:30:03,210 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:03,211 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:03,211 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK] 2024-11-07T12:30:03,211 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:03,211 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:03,211 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:03,211 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982603205.meta 2024-11-07T12:30:03,211 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:03,212 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:03,212 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta 2024-11-07T12:30:03,212 WARN [IPC Server handler 1 on default port 39665 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741834_1010 2024-11-07T12:30:03,212 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38549:38549),(127.0.0.1/127.0.0.1:44117:44117)] 2024-11-07T12:30:03,212 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta is not closed yet, will try archiving it next time 2024-11-07T12:30:03,212 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta after 0ms 2024-11-07T12:30:03,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741895_1080 (size=14660) 2024-11-07T12:30:03,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741895_1080 (size=14660) 2024-11-07T12:30:03,217 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/868ae9ec71af48839c64f24ce21b72ad 2024-11-07T12:30:03,223 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/.tmp/info/868ae9ec71af48839c64f24ce21b72ad as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/868ae9ec71af48839c64f24ce21b72ad 2024-11-07T12:30:03,228 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/868ae9ec71af48839c64f24ce21b72ad, entries=9, sequenceid=78, filesize=14.3 K 2024-11-07T12:30:03,229 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for a522e52eecdb10e9bf4c387c36fbbb6c in 26ms, sequenceid=78, compaction requested=true 2024-11-07T12:30:03,233 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/ab341a45c7c94e09b42cfb56965bb998, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/2199b9d7937e4bed8777da169958fcb4, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/61f3fe28946e478a80516f2c5b2d976e, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/f78819e18bcd4076b306a86416f770c4, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/139453e68d094ba783c06ebafbe9c171, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/d86f2ec979584973a2fe06c2205867c2] to archive 2024-11-07T12:30:03,234 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:30:03,236 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/ab341a45c7c94e09b42cfb56965bb998 to hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/ab341a45c7c94e09b42cfb56965bb998 2024-11-07T12:30:03,236 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/.tmp/info/1365a3152788408388816fa304935ce3 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c./info:regioninfo/1730982566442/Put/seqid=0 2024-11-07T12:30:03,237 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/2199b9d7937e4bed8777da169958fcb4 to hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/2199b9d7937e4bed8777da169958fcb4 2024-11-07T12:30:03,239 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1082 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:03,239 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:49414 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741896_1082] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data4]'}, localName='127.0.0.1:37921', datanodeUuid='e50dee72-73bb-452a-a5d4-1fb4c3bf8057', xmitsInProgress=0}:Exception transferring block BP-1009609649-172.17.0.3-1730982564245:blk_1073741896_1082 to mirror 127.0.0.1:44341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:03,239 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741896_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37921,DS-c263f208-3cd5-400f-be78-6d95bfd6cf86,DISK], DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:30:03,239 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741896_1082 2024-11-07T12:30:03,239 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:49414 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741896_1082] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:30:03,239 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1846368508_22 at /127.0.0.1:49414 [Receiving block BP-1009609649-172.17.0.3-1730982564245:blk_1073741896_1082] {}] datanode.DataXceiver(331): 127.0.0.1:37921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49414 dst: /127.0.0.1:37921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:03,239 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/61f3fe28946e478a80516f2c5b2d976e to hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/61f3fe28946e478a80516f2c5b2d976e 2024-11-07T12:30:03,239 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:30:03,241 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:03,241 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:30:03,241 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741897_1083 2024-11-07T12:30:03,241 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/f78819e18bcd4076b306a86416f770c4 to hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/f78819e18bcd4076b306a86416f770c4 2024-11-07T12:30:03,241 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:30:03,243 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/139453e68d094ba783c06ebafbe9c171 to hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/139453e68d094ba783c06ebafbe9c171 2024-11-07T12:30:03,244 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/d86f2ec979584973a2fe06c2205867c2 to hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/info/d86f2ec979584973a2fe06c2205867c2 2024-11-07T12:30:03,245 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=87fb68174c52:41297 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-07T12:30:03,245 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [ab341a45c7c94e09b42cfb56965bb998=10347, 2199b9d7937e4bed8777da169958fcb4=12506, 61f3fe28946e478a80516f2c5b2d976e=17994, f78819e18bcd4076b306a86416f770c4=6027, 139453e68d094ba783c06ebafbe9c171=6027, d86f2ec979584973a2fe06c2205867c2=6027] 2024-11-07T12:30:03,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741898_1084 (size=7089) 2024-11-07T12:30:03,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741898_1084 (size=7089) 2024-11-07T12:30:03,253 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/.tmp/info/1365a3152788408388816fa304935ce3 2024-11-07T12:30:03,253 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a522e52eecdb10e9bf4c387c36fbbb6c/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-07T12:30:03,254 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:30:03,254 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a522e52eecdb10e9bf4c387c36fbbb6c: Waiting for close lock at 1730982603203Running coprocessor pre-close hooks at 1730982603203Disabling compacts and flushes for region at 1730982603203Disabling writes for close at 1730982603203Obtaining lock to block concurrent updates at 1730982603203Preparing flush snapshotting stores in a522e52eecdb10e9bf4c387c36fbbb6c at 1730982603203Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1730982603204 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. at 1730982603204Flushing a522e52eecdb10e9bf4c387c36fbbb6c/info: creating writer at 1730982603204Flushing a522e52eecdb10e9bf4c387c36fbbb6c/info: appending metadata at 1730982603208 (+4 ms)Flushing a522e52eecdb10e9bf4c387c36fbbb6c/info: closing flushed file at 1730982603208Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d923edb: reopening flushed file at 1730982603222 (+14 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for a522e52eecdb10e9bf4c387c36fbbb6c in 26ms, sequenceid=78, compaction requested=true at 1730982603229 (+7 ms)Writing region close event to WAL at 1730982603249 (+20 ms)Running coprocessor post-close hooks at 1730982603254 (+5 ms)Closed at 1730982603254 2024-11-07T12:30:03,254 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1730982566080.a522e52eecdb10e9bf4c387c36fbbb6c. 2024-11-07T12:30:03,267 INFO [regionserver/87fb68174c52:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T12:30:03,267 INFO [regionserver/87fb68174c52:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T12:30:03,269 INFO [regionserver/87fb68174c52:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:30:03,273 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/.tmp/ns/7083fe16639e4dc7894b379795ec031c is 43, key is default/ns:d/1730982565868/Put/seqid=0 2024-11-07T12:30:03,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741899_1085 (size=5153) 2024-11-07T12:30:03,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741899_1085 (size=5153) 2024-11-07T12:30:03,278 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/.tmp/ns/7083fe16639e4dc7894b379795ec031c 2024-11-07T12:30:03,306 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/.tmp/table/cb8a6b9624504643bd515b41b937b1a8 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1730982566452/Put/seqid=0 2024-11-07T12:30:03,308 WARN [Thread-1065 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:03,308 WARN [Thread-1065 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK], DatanodeInfoWithStorage[127.0.0.1:37967,DS-aa8e78f9-7c21-42ef-acbf-6c8800bd6962,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK]) is bad. 2024-11-07T12:30:03,308 WARN [Thread-1065 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741900_1086 2024-11-07T12:30:03,309 WARN [Thread-1065 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44341,DS-9ed919b2-e012-49e7-9e6a-d7fba9ce08cd,DISK] 2024-11-07T12:30:03,310 WARN [Thread-1065 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:03,310 WARN [Thread-1065 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1009609649-172.17.0.3-1730982564245:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK], DatanodeInfoWithStorage[127.0.0.1:35157,DS-b201ff21-27e5-4c46-a980-e9a151a4b43d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK]) is bad. 2024-11-07T12:30:03,310 WARN [Thread-1065 {}] hdfs.DataStreamer(1850): Abandoning BP-1009609649-172.17.0.3-1730982564245:blk_1073741901_1087 2024-11-07T12:30:03,311 WARN [Thread-1065 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36671,DS-ab03a237-adcb-40c5-bc6d-9768ebea06ee,DISK] 2024-11-07T12:30:03,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741902_1088 (size=5424) 2024-11-07T12:30:03,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741902_1088 (size=5424) 2024-11-07T12:30:03,316 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/.tmp/table/cb8a6b9624504643bd515b41b937b1a8 2024-11-07T12:30:03,322 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/.tmp/info/1365a3152788408388816fa304935ce3 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/info/1365a3152788408388816fa304935ce3 2024-11-07T12:30:03,328 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/info/1365a3152788408388816fa304935ce3, entries=10, sequenceid=11, filesize=6.9 K 2024-11-07T12:30:03,329 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/.tmp/ns/7083fe16639e4dc7894b379795ec031c as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/ns/7083fe16639e4dc7894b379795ec031c 2024-11-07T12:30:03,335 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/ns/7083fe16639e4dc7894b379795ec031c, entries=2, sequenceid=11, filesize=5.0 K 2024-11-07T12:30:03,336 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/.tmp/table/cb8a6b9624504643bd515b41b937b1a8 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/table/cb8a6b9624504643bd515b41b937b1a8 2024-11-07T12:30:03,341 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/table/cb8a6b9624504643bd515b41b937b1a8, entries=2, sequenceid=11, filesize=5.3 K 2024-11-07T12:30:03,342 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false 2024-11-07T12:30:03,347 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-07T12:30:03,348 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:30:03,348 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:30:03,348 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982603204Running coprocessor pre-close hooks at 1730982603204Disabling compacts and flushes for region at 1730982603204Disabling writes for close at 1730982603204Obtaining lock to block concurrent updates at 1730982603204Preparing flush snapshotting stores in 1588230740 at 1730982603204Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1730982603204Flushing stores of hbase:meta,,1.1588230740 at 1730982603213 (+9 ms)Flushing 1588230740/info: creating writer at 1730982603213Flushing 1588230740/info: appending metadata at 1730982603235 (+22 ms)Flushing 1588230740/info: closing flushed file at 1730982603235Flushing 1588230740/ns: creating writer at 1730982603259 (+24 ms)Flushing 1588230740/ns: appending metadata at 1730982603272 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1730982603272Flushing 1588230740/table: creating writer at 1730982603285 (+13 ms)Flushing 1588230740/table: appending metadata at 1730982603306 (+21 ms)Flushing 1588230740/table: closing flushed file at 1730982603306Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e57d01f: reopening flushed file at 1730982603322 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c2326f3: reopening flushed file at 1730982603328 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fb15631: reopening flushed file at 1730982603335 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false at 1730982603342 (+7 ms)Writing region close event to WAL at 1730982603343 (+1 ms)Running coprocessor post-close hooks at 1730982603348 (+5 ms)Closed at 1730982603348 2024-11-07T12:30:03,348 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:30:03,404 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(976): stopping server 87fb68174c52,45061,1730982564994; all regions closed. 2024-11-07T12:30:03,405 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:03,405 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:03,405 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:03,405 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:03,405 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:03,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741893_1078 (size=825) 2024-11-07T12:30:03,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741893_1078 (size=825) 2024-11-07T12:30:03,584 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3479fe30[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37967, datanodeUuid=ec31049c-c9de-4285-9e6d-5a8b30ce0dd0, infoPort=44117, infoSecurePort=0, ipcPort=35033, storageInfo=lv=-57;cid=testClusterID;nsid=1976247185;c=1730982564245):Failed to transfer BP-1009609649-172.17.0.3-1730982564245:blk_1073741874_1057 to 127.0.0.1:35157 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:04,049 INFO [regionserver/87fb68174c52:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:30:05,885 INFO [master/87fb68174c52:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-07T12:30:05,885 INFO [master/87fb68174c52:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-07T12:30:06,148 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7be298e1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1009609649-172.17.0.3-1730982564245:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:36671,null,null]) java.net.ConnectException: Call From 87fb68174c52/172.17.0.3 to localhost:40541 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-07T12:30:06,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741830_1071 (size=27306) 2024-11-07T12:30:06,517 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-07T12:30:06,518 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:30:06,519 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T12:30:07,000 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 after 4002ms 2024-11-07T12:30:07,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:30:07,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741836_1012 (size=76) 2024-11-07T12:30:07,213 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta after 4001ms 2024-11-07T12:30:07,998 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-07T12:30:08,001 DEBUG [RS:1;87fb68174c52:42371 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/oldWALs 2024-11-07T12:30:08,001 INFO [RS:1;87fb68174c52:42371 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C42371%2C1730982565959:(num 1730982566180) 2024-11-07T12:30:08,001 DEBUG [RS:1;87fb68174c52:42371 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:08,001 INFO [RS:1;87fb68174c52:42371 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:30:08,001 INFO [RS:1;87fb68174c52:42371 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:30:08,001 INFO [RS:1;87fb68174c52:42371 {}] hbase.ChoreService(370): Chore service for: regionserver/87fb68174c52:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T12:30:08,001 INFO [RS:1;87fb68174c52:42371 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:30:08,001 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:30:08,001 INFO [RS:1;87fb68174c52:42371 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:30:08,001 INFO [RS:1;87fb68174c52:42371 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:30:08,001 INFO [RS:1;87fb68174c52:42371 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:30:08,001 INFO [RS:1;87fb68174c52:42371 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42371 2024-11-07T12:30:08,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42371-0x1001a37dedd0002, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87fb68174c52,42371,1730982565959 2024-11-07T12:30:08,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:30:08,003 INFO [RS:1;87fb68174c52:42371 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:30:08,004 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87fb68174c52,42371,1730982565959] 2024-11-07T12:30:08,006 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87fb68174c52,42371,1730982565959 already deleted, retry=false 2024-11-07T12:30:08,006 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87fb68174c52,42371,1730982565959 expired; onlineServers=1 2024-11-07T12:30:08,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:08,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42371-0x1001a37dedd0002, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:30:08,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42371-0x1001a37dedd0002, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:30:08,105 INFO [RS:1;87fb68174c52:42371 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:30:08,106 INFO [RS:1;87fb68174c52:42371 {}] regionserver.HRegionServer(1031): Exiting; stopping=87fb68174c52,42371,1730982565959; zookeeper connection closed. 2024-11-07T12:30:08,106 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@41175bbe {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@41175bbe 2024-11-07T12:30:08,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:30:08,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:30:08,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,406 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-07T12:30:08,409 DEBUG [RS:0;87fb68174c52:45061 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/oldWALs 2024-11-07T12:30:08,409 INFO [RS:0;87fb68174c52:45061 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C45061%2C1730982564994.meta:.meta(num 1730982603205) 2024-11-07T12:30:08,409 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:08,410 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:08,410 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:08,410 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:08,410 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:08,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741891_1075 (size=14682) 2024-11-07T12:30:08,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741891_1075 (size=14682) 2024-11-07T12:30:08,778 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:30:08,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:08,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:09,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:09,213 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-07T12:30:09,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:10,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:10,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:30:10,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741835_1011 (size=393) 2024-11-07T12:30:10,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:11,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:11,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:30:11,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:30:11,213 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-07T12:30:11,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:12,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:12,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:13,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:13,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:30:13,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741830_1071 (size=27306) 2024-11-07T12:30:13,214 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-07T12:30:13,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:13,410 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-07T12:30:13,414 DEBUG [RS:0;87fb68174c52:45061 {}] wal.AbstractFSWAL(1256): Moved 3 WAL file(s) to /user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/oldWALs 2024-11-07T12:30:13,414 INFO [RS:0;87fb68174c52:45061 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C45061%2C1730982564994:(num 1730982602756) 2024-11-07T12:30:13,414 DEBUG [RS:0;87fb68174c52:45061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:13,414 INFO [RS:0;87fb68174c52:45061 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:30:13,414 INFO [RS:0;87fb68174c52:45061 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:30:13,415 INFO [RS:0;87fb68174c52:45061 {}] hbase.ChoreService(370): Chore service for: regionserver/87fb68174c52:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-07T12:30:13,415 INFO [RS:0;87fb68174c52:45061 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:30:13,415 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:30:13,415 INFO [RS:0;87fb68174c52:45061 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45061 2024-11-07T12:30:13,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87fb68174c52,45061,1730982564994 2024-11-07T12:30:13,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:30:13,417 INFO [RS:0;87fb68174c52:45061 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:30:13,419 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87fb68174c52,45061,1730982564994] 2024-11-07T12:30:13,420 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87fb68174c52,45061,1730982564994 already deleted, retry=false 2024-11-07T12:30:13,420 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87fb68174c52,45061,1730982564994 expired; onlineServers=0 2024-11-07T12:30:13,420 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87fb68174c52,41297,1730982564944' ***** 2024-11-07T12:30:13,420 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:30:13,420 INFO [M:0;87fb68174c52:41297 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:30:13,420 INFO [M:0;87fb68174c52:41297 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:30:13,420 DEBUG [M:0;87fb68174c52:41297 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:30:13,420 DEBUG [M:0;87fb68174c52:41297 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:30:13,421 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:30:13,421 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982565201 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982565201,5,FailOnTimeoutGroup] 2024-11-07T12:30:13,421 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982565203 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982565203,5,FailOnTimeoutGroup] 2024-11-07T12:30:13,421 INFO [M:0;87fb68174c52:41297 {}] hbase.ChoreService(370): Chore service for: master/87fb68174c52:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:30:13,421 INFO [M:0;87fb68174c52:41297 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:30:13,421 DEBUG [M:0;87fb68174c52:41297 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:30:13,421 INFO [M:0;87fb68174c52:41297 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:30:13,421 INFO [M:0;87fb68174c52:41297 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:30:13,421 INFO [M:0;87fb68174c52:41297 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:30:13,421 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:30:13,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:30:13,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:13,422 DEBUG [M:0;87fb68174c52:41297 {}] zookeeper.ZKUtil(347): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:30:13,422 WARN [M:0;87fb68174c52:41297 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:30:13,423 INFO [M:0;87fb68174c52:41297 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/.lastflushedseqids 2024-11-07T12:30:13,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741903_1089 (size=130) 2024-11-07T12:30:13,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741903_1089 (size=130) 2024-11-07T12:30:13,429 INFO [M:0;87fb68174c52:41297 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:30:13,429 INFO [M:0;87fb68174c52:41297 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:30:13,429 DEBUG [M:0;87fb68174c52:41297 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:30:13,429 INFO [M:0;87fb68174c52:41297 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:13,429 DEBUG [M:0;87fb68174c52:41297 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:13,429 DEBUG [M:0;87fb68174c52:41297 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:30:13,429 DEBUG [M:0;87fb68174c52:41297 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:13,430 INFO [M:0;87fb68174c52:41297 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-07T12:30:13,445 DEBUG [M:0;87fb68174c52:41297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1e3cd6ffca6c47b7b5bd8aa66e9247b6 is 82, key is hbase:meta,,1/info:regioninfo/1730982565851/Put/seqid=0 2024-11-07T12:30:13,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741904_1090 (size=5672) 2024-11-07T12:30:13,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741904_1090 (size=5672) 2024-11-07T12:30:13,452 INFO [M:0;87fb68174c52:41297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1e3cd6ffca6c47b7b5bd8aa66e9247b6 2024-11-07T12:30:13,473 DEBUG [M:0;87fb68174c52:41297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/51f0a47c03b9444280f421aa8ed07782 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730982566457/Put/seqid=0 2024-11-07T12:30:13,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741905_1091 (size=6255) 2024-11-07T12:30:13,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741905_1091 (size=6255) 2024-11-07T12:30:13,479 INFO [M:0;87fb68174c52:41297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/51f0a47c03b9444280f421aa8ed07782 2024-11-07T12:30:13,484 INFO [M:0;87fb68174c52:41297 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 51f0a47c03b9444280f421aa8ed07782 2024-11-07T12:30:13,498 DEBUG [M:0;87fb68174c52:41297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/44bd01d82fc04bcf8480a87bfbe3e5e9 is 69, key is 87fb68174c52,42371,1730982565959/rs:state/1730982566031/Put/seqid=0 2024-11-07T12:30:13,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741906_1092 (size=5224) 2024-11-07T12:30:13,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741906_1092 (size=5224) 2024-11-07T12:30:13,503 INFO [M:0;87fb68174c52:41297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/44bd01d82fc04bcf8480a87bfbe3e5e9 2024-11-07T12:30:13,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:30:13,519 INFO [RS:0;87fb68174c52:45061 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:30:13,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1001a37dedd0001, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:30:13,519 INFO [RS:0;87fb68174c52:45061 {}] regionserver.HRegionServer(1031): Exiting; stopping=87fb68174c52,45061,1730982564994; zookeeper connection closed. 2024-11-07T12:30:13,519 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@717fb4e6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@717fb4e6 2024-11-07T12:30:13,519 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-07T12:30:13,522 DEBUG [M:0;87fb68174c52:41297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4c23b8abe96241ce95529186a2d668c5 is 52, key is load_balancer_on/state:d/1730982565933/Put/seqid=0 2024-11-07T12:30:13,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741907_1093 (size=5056) 2024-11-07T12:30:13,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741907_1093 (size=5056) 2024-11-07T12:30:13,527 INFO [M:0;87fb68174c52:41297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4c23b8abe96241ce95529186a2d668c5 2024-11-07T12:30:13,533 DEBUG [M:0;87fb68174c52:41297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1e3cd6ffca6c47b7b5bd8aa66e9247b6 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1e3cd6ffca6c47b7b5bd8aa66e9247b6 2024-11-07T12:30:13,537 INFO [M:0;87fb68174c52:41297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1e3cd6ffca6c47b7b5bd8aa66e9247b6, entries=8, sequenceid=60, filesize=5.5 K 2024-11-07T12:30:13,538 DEBUG [M:0;87fb68174c52:41297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/51f0a47c03b9444280f421aa8ed07782 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/51f0a47c03b9444280f421aa8ed07782 2024-11-07T12:30:13,542 INFO [M:0;87fb68174c52:41297 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 51f0a47c03b9444280f421aa8ed07782 2024-11-07T12:30:13,543 INFO [M:0;87fb68174c52:41297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/51f0a47c03b9444280f421aa8ed07782, entries=6, sequenceid=60, filesize=6.1 K 2024-11-07T12:30:13,543 DEBUG [M:0;87fb68174c52:41297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/44bd01d82fc04bcf8480a87bfbe3e5e9 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/44bd01d82fc04bcf8480a87bfbe3e5e9 2024-11-07T12:30:13,548 INFO [M:0;87fb68174c52:41297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/44bd01d82fc04bcf8480a87bfbe3e5e9, entries=2, sequenceid=60, filesize=5.1 K 2024-11-07T12:30:13,549 DEBUG [M:0;87fb68174c52:41297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4c23b8abe96241ce95529186a2d668c5 as hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4c23b8abe96241ce95529186a2d668c5 2024-11-07T12:30:13,554 INFO [M:0;87fb68174c52:41297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4c23b8abe96241ce95529186a2d668c5, entries=1, sequenceid=60, filesize=4.9 K 2024-11-07T12:30:13,555 INFO [M:0;87fb68174c52:41297 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=60, compaction requested=false 2024-11-07T12:30:13,556 INFO [M:0;87fb68174c52:41297 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:13,556 DEBUG [M:0;87fb68174c52:41297 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982613429Disabling compacts and flushes for region at 1730982613429Disabling writes for close at 1730982613429Obtaining lock to block concurrent updates at 1730982613430 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730982613430Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1730982613430Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730982613431 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730982613431Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730982613445 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730982613445Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730982613458 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730982613472 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730982613472Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730982613484 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730982613498 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730982613498Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730982613508 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730982613522 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730982613522Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37b6b5a5: reopening flushed file at 1730982613532 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55894841: reopening flushed file at 1730982613537 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30ad8246: reopening flushed file at 1730982613543 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ec4a188: reopening flushed file at 1730982613548 (+5 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=60, compaction requested=false at 1730982613555 (+7 ms)Writing region close event to WAL at 1730982613556 (+1 ms)Closed at 1730982613556 2024-11-07T12:30:13,556 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:13,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:13,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:13,557 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:13,557 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:13,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37967 is added to blk_1073741887_1070 (size=1045) 2024-11-07T12:30:13,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37921 is added to blk_1073741887_1070 (size=1045) 2024-11-07T12:30:13,559 INFO [M:0;87fb68174c52:41297 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:30:13,559 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:30:13,559 INFO [M:0;87fb68174c52:41297 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41297 2024-11-07T12:30:13,560 INFO [M:0;87fb68174c52:41297 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:30:13,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:30:13,662 INFO [M:0;87fb68174c52:41297 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:30:13,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41297-0x1001a37dedd0000, quorum=127.0.0.1:62457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:30:13,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e407f59{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:13,664 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@413b5d87{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:30:13,664 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:30:13,665 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aaad1eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:30:13,665 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39eb7ad6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,STOPPED} 2024-11-07T12:30:13,666 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4a53e685 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1009609649-172.17.0.3-1730982564245:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:36671,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:40541 , LocalHost:localPort 87fb68174c52/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-07T12:30:13,666 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:30:13,667 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:30:13,667 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:30:13,667 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1009609649-172.17.0.3-1730982564245 (Datanode Uuid e50dee72-73bb-452a-a5d4-1fb4c3bf8057) service to localhost/127.0.0.1:39665 2024-11-07T12:30:13,667 ERROR [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4a53e685 {}] datanode.DataNode(1743): Cannot find BPOfferService for reporting block received for bpid=BP-1009609649-172.17.0.3-1730982564245 2024-11-07T12:30:13,667 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data3/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:13,668 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data4/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:13,668 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:30:13,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b5be5aa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:13,670 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f7383da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:30:13,670 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:30:13,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167a7fde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:30:13,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@519de6b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,STOPPED} 2024-11-07T12:30:13,672 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:30:13,672 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:30:13,672 WARN [BP-1009609649-172.17.0.3-1730982564245 heartbeating to localhost/127.0.0.1:39665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1009609649-172.17.0.3-1730982564245 (Datanode Uuid ec31049c-c9de-4285-9e6d-5a8b30ce0dd0) service to localhost/127.0.0.1:39665 2024-11-07T12:30:13,672 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:30:13,672 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data7/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:13,672 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/cluster_b5a179f0-0487-79d4-9780-44217b3c02dc/data/data8/current/BP-1009609649-172.17.0.3-1730982564245 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:13,673 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:30:13,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62b96b7c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:30:13,679 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fd186ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:30:13,679 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:30:13,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2305029e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:30:13,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42b52d44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir/,STOPPED} 2024-11-07T12:30:13,687 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:30:13,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:30:13,725 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 78) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39665 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39665 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f4988bef800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:39665 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46863 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:46863 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39665 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:39665 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39665 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f4988bef800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39665 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f4988bef800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=436 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=75 (was 106), ProcessCount=11 (was 11), AvailableMemoryMB=6577 (was 7016) 2024-11-07T12:30:13,731 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=436, MaxFileDescriptor=1048576, SystemLoadAverage=75, ProcessCount=11, AvailableMemoryMB=6577 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.log.dir so I do NOT create it in target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/05cc32db-d650-fdf6-690f-e8b1e0540b79/hadoop.tmp.dir so I do NOT create it in target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1, deleteOnExit=true 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/test.cache.data in system properties and HBase conf 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:30:13,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:30:13,733 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:30:13,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:30:13,746 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:30:13,814 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:13,819 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:30:13,820 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:30:13,820 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:30:13,820 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:30:13,821 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:13,822 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3a743f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:30:13,822 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@421a8f73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:30:13,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:13,936 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53a4c428{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/java.io.tmpdir/jetty-localhost-40987-hadoop-hdfs-3_4_1-tests_jar-_-any-12979331765791775035/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:30:13,937 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@347a2271{HTTP/1.1, (http/1.1)}{localhost:40987} 2024-11-07T12:30:13,937 INFO [Time-limited test {}] server.Server(415): Started @152610ms 2024-11-07T12:30:13,950 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:30:14,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:14,017 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:14,021 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:30:14,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:30:14,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:30:14,022 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:30:14,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@354edf1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:30:14,023 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66c0323e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:30:14,137 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60017892{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/java.io.tmpdir/jetty-localhost-36937-hadoop-hdfs-3_4_1-tests_jar-_-any-16346798739256876169/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:14,138 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fefca8b{HTTP/1.1, (http/1.1)}{localhost:36937} 2024-11-07T12:30:14,138 INFO [Time-limited test {}] server.Server(415): Started @152811ms 2024-11-07T12:30:14,140 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:30:14,171 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:14,175 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:30:14,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:30:14,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:30:14,176 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:30:14,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11b4bf4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:30:14,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28441b3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:30:14,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:14,224 WARN [Thread-1192 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data1/current/BP-1179130194-172.17.0.3-1730982613764/current, will proceed with Du for space computation calculation, 2024-11-07T12:30:14,224 WARN [Thread-1193 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data2/current/BP-1179130194-172.17.0.3-1730982613764/current, will proceed with Du for space computation calculation, 2024-11-07T12:30:14,240 WARN [Thread-1171 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:30:14,243 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd6a77e27167d4f94 with lease ID 0x84e561ebb1dcf7cb: Processing first storage report for DS-f38ef4b4-466d-4084-84a0-2b69237f20d6 from datanode DatanodeRegistration(127.0.0.1:41465, datanodeUuid=0e35ec97-7186-4c94-b36a-44916e277331, infoPort=45201, infoSecurePort=0, ipcPort=39877, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764) 2024-11-07T12:30:14,243 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd6a77e27167d4f94 with lease ID 0x84e561ebb1dcf7cb: from storage DS-f38ef4b4-466d-4084-84a0-2b69237f20d6 node DatanodeRegistration(127.0.0.1:41465, datanodeUuid=0e35ec97-7186-4c94-b36a-44916e277331, infoPort=45201, infoSecurePort=0, ipcPort=39877, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:14,243 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd6a77e27167d4f94 with lease ID 0x84e561ebb1dcf7cb: Processing first storage report for DS-96c5feb9-c631-464d-ba92-6f0a3b3c2963 from datanode DatanodeRegistration(127.0.0.1:41465, datanodeUuid=0e35ec97-7186-4c94-b36a-44916e277331, infoPort=45201, infoSecurePort=0, ipcPort=39877, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764) 2024-11-07T12:30:14,243 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd6a77e27167d4f94 with lease ID 0x84e561ebb1dcf7cb: from storage DS-96c5feb9-c631-464d-ba92-6f0a3b3c2963 node DatanodeRegistration(127.0.0.1:41465, datanodeUuid=0e35ec97-7186-4c94-b36a-44916e277331, infoPort=45201, infoSecurePort=0, ipcPort=39877, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:14,293 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6fab6db5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/java.io.tmpdir/jetty-localhost-45387-hadoop-hdfs-3_4_1-tests_jar-_-any-7559164727079187623/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:14,294 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43a454f0{HTTP/1.1, (http/1.1)}{localhost:45387} 2024-11-07T12:30:14,294 INFO [Time-limited test {}] server.Server(415): Started @152966ms 2024-11-07T12:30:14,295 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:30:14,373 WARN [Thread-1218 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data3/current/BP-1179130194-172.17.0.3-1730982613764/current, will proceed with Du for space computation calculation, 2024-11-07T12:30:14,373 WARN [Thread-1219 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data4/current/BP-1179130194-172.17.0.3-1730982613764/current, will proceed with Du for space computation calculation, 2024-11-07T12:30:14,388 WARN [Thread-1207 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:30:14,390 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x766cb06f793426d9 with lease ID 0x84e561ebb1dcf7cc: Processing first storage report for DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe from datanode DatanodeRegistration(127.0.0.1:35641, datanodeUuid=2f76db2f-1739-4f70-930b-40cb8a44a362, infoPort=34555, infoSecurePort=0, ipcPort=40149, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764) 2024-11-07T12:30:14,390 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x766cb06f793426d9 with lease ID 0x84e561ebb1dcf7cc: from storage DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe node DatanodeRegistration(127.0.0.1:35641, datanodeUuid=2f76db2f-1739-4f70-930b-40cb8a44a362, infoPort=34555, infoSecurePort=0, ipcPort=40149, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x766cb06f793426d9 with lease ID 0x84e561ebb1dcf7cc: Processing first storage report for DS-f9472b55-b87e-4e87-bb60-cfb8b6c4da2f from datanode DatanodeRegistration(127.0.0.1:35641, datanodeUuid=2f76db2f-1739-4f70-930b-40cb8a44a362, infoPort=34555, infoSecurePort=0, ipcPort=40149, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764) 2024-11-07T12:30:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x766cb06f793426d9 with lease ID 0x84e561ebb1dcf7cc: from storage DS-f9472b55-b87e-4e87-bb60-cfb8b6c4da2f node DatanodeRegistration(127.0.0.1:35641, datanodeUuid=2f76db2f-1739-4f70-930b-40cb8a44a362, infoPort=34555, infoSecurePort=0, ipcPort=40149, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:14,419 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2 2024-11-07T12:30:14,421 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/zookeeper_0, clientPort=61811, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:30:14,422 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61811 2024-11-07T12:30:14,422 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:14,424 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:14,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35641 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:30:14,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41465 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:30:14,433 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699 with version=8 2024-11-07T12:30:14,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/hbase-staging 2024-11-07T12:30:14,436 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:30:14,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:14,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:14,436 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:30:14,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:14,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:30:14,436 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:30:14,436 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:30:14,437 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36547 2024-11-07T12:30:14,438 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36547 connecting to ZooKeeper ensemble=127.0.0.1:61811 2024-11-07T12:30:14,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:365470x0, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:30:14,443 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36547-0x1001a38a0330000 connected 2024-11-07T12:30:14,460 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:14,461 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:14,463 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:30:14,463 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699, hbase.cluster.distributed=false 2024-11-07T12:30:14,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:30:14,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36547 2024-11-07T12:30:14,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36547 2024-11-07T12:30:14,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36547 2024-11-07T12:30:14,469 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36547 2024-11-07T12:30:14,469 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36547 2024-11-07T12:30:14,484 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:30:14,484 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:14,484 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:14,484 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:30:14,484 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:14,485 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:30:14,485 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:30:14,485 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:30:14,485 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33225 2024-11-07T12:30:14,487 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33225 connecting to ZooKeeper ensemble=127.0.0.1:61811 2024-11-07T12:30:14,487 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:14,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:14,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:332250x0, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:30:14,493 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:332250x0, quorum=127.0.0.1:61811, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:30:14,493 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33225-0x1001a38a0330001 connected 2024-11-07T12:30:14,493 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:30:14,494 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:30:14,495 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:30:14,496 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:30:14,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33225 2024-11-07T12:30:14,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33225 2024-11-07T12:30:14,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33225 2024-11-07T12:30:14,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33225 2024-11-07T12:30:14,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33225 2024-11-07T12:30:14,515 DEBUG [M:0;87fb68174c52:36547 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87fb68174c52:36547 2024-11-07T12:30:14,515 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87fb68174c52,36547,1730982614435 2024-11-07T12:30:14,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:30:14,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:30:14,518 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87fb68174c52,36547,1730982614435 2024-11-07T12:30:14,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:30:14,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:14,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:14,520 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:30:14,520 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87fb68174c52,36547,1730982614435 from backup master directory 2024-11-07T12:30:14,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87fb68174c52,36547,1730982614435 2024-11-07T12:30:14,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:30:14,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:30:14,522 WARN [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:30:14,522 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87fb68174c52,36547,1730982614435 2024-11-07T12:30:14,526 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/hbase.id] with ID: 51c96f3c-1de7-4c3f-8c6d-79397f9d9be7 2024-11-07T12:30:14,526 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/.tmp/hbase.id 2024-11-07T12:30:14,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41465 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:30:14,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35641 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:30:14,534 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/.tmp/hbase.id]:[hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/hbase.id] 2024-11-07T12:30:14,545 INFO [master/87fb68174c52:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:14,545 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:30:14,547 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-07T12:30:14,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:14,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:14,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41465 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:30:14,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35641 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:30:14,558 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:30:14,558 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:30:14,559 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:30:14,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41465 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:30:14,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35641 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:30:14,567 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store 2024-11-07T12:30:14,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35641 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:30:14,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41465 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:30:14,574 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:14,574 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:30:14,574 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:14,574 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:14,574 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:30:14,574 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:14,574 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:14,574 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982614574Disabling compacts and flushes for region at 1730982614574Disabling writes for close at 1730982614574Writing region close event to WAL at 1730982614574Closed at 1730982614574 2024-11-07T12:30:14,575 WARN [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/.initializing 2024-11-07T12:30:14,575 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435 2024-11-07T12:30:14,578 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C36547%2C1730982614435, suffix=, logDir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435, archiveDir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/oldWALs, maxLogs=10 2024-11-07T12:30:14,579 INFO [master/87fb68174c52:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C36547%2C1730982614435.1730982614578 2024-11-07T12:30:14,583 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435/87fb68174c52%2C36547%2C1730982614435.1730982614578 2024-11-07T12:30:14,584 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45201:45201),(127.0.0.1/127.0.0.1:34555:34555)] 2024-11-07T12:30:14,587 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:30:14,587 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:14,587 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,587 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:30:14,592 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:14,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:14,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:30:14,594 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:14,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:30:14,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:30:14,595 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:14,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:30:14,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,597 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:30:14,597 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:14,597 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:30:14,598 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,598 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,598 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,600 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,600 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,600 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:30:14,601 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:14,603 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:30:14,603 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780252, jitterRate=-0.007859036326408386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:30:14,604 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730982614588Initializing all the Stores at 1730982614589 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982614589Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982614591 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982614591Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982614591Cleaning up temporary data from old regions at 1730982614600 (+9 ms)Region opened successfully at 1730982614604 (+4 ms) 2024-11-07T12:30:14,604 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:30:14,607 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1db98835, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:30:14,608 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:30:14,608 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:30:14,608 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:30:14,608 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:30:14,609 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:30:14,609 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:30:14,609 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:30:14,612 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:30:14,613 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:30:14,614 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:30:14,615 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:30:14,616 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:30:14,617 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:30:14,618 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:30:14,619 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:30:14,621 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:30:14,622 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:30:14,623 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:30:14,625 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:30:14,626 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:30:14,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:30:14,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:30:14,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:14,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:14,629 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87fb68174c52,36547,1730982614435, sessionid=0x1001a38a0330000, setting cluster-up flag (Was=false) 2024-11-07T12:30:14,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:14,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:14,637 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:30:14,638 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,36547,1730982614435 2024-11-07T12:30:14,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:14,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:14,648 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:30:14,649 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,36547,1730982614435 2024-11-07T12:30:14,650 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:30:14,651 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:30:14,652 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:30:14,652 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:30:14,652 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87fb68174c52,36547,1730982614435 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:30:14,653 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:30:14,653 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:30:14,653 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:30:14,653 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:30:14,653 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87fb68174c52:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:30:14,653 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,653 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:30:14,653 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,655 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:30:14,655 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:30:14,656 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:14,656 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730982644656 2024-11-07T12:30:14,656 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:30:14,656 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:30:14,656 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:30:14,656 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:30:14,657 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:30:14,657 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:30:14,656 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:30:14,660 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,661 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:30:14,661 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:30:14,661 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:30:14,661 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:30:14,661 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:30:14,661 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982614661,5,FailOnTimeoutGroup] 2024-11-07T12:30:14,662 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982614661,5,FailOnTimeoutGroup] 2024-11-07T12:30:14,662 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,662 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:30:14,662 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,662 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41465 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:30:14,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35641 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:30:14,666 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:30:14,666 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699 2024-11-07T12:30:14,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41465 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:30:14,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35641 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:30:14,674 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:14,676 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:30:14,677 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:30:14,677 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:14,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:14,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:30:14,680 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:30:14,680 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:14,680 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:14,680 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:30:14,681 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:30:14,681 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:14,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:14,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:30:14,683 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:30:14,683 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:14,684 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:14,684 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:30:14,684 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740 2024-11-07T12:30:14,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740 2024-11-07T12:30:14,686 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:30:14,686 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:30:14,686 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:30:14,687 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:30:14,689 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:30:14,690 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876534, jitterRate=0.11457134783267975}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:30:14,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730982614675Initializing all the Stores at 1730982614675Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982614675Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982614676 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982614676Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982614676Cleaning up temporary data from old regions at 1730982614686 (+10 ms)Region opened successfully at 1730982614690 (+4 ms) 2024-11-07T12:30:14,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:30:14,690 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:30:14,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:30:14,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:30:14,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:30:14,691 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:30:14,691 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982614690Disabling compacts and flushes for region at 1730982614690Disabling writes for close at 1730982614690Writing region close event to WAL at 1730982614691 (+1 ms)Closed at 1730982614691 2024-11-07T12:30:14,692 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:30:14,692 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:30:14,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:30:14,694 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:30:14,695 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:30:14,699 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(746): ClusterId : 51c96f3c-1de7-4c3f-8c6d-79397f9d9be7 2024-11-07T12:30:14,699 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:30:14,702 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:30:14,702 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:30:14,704 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:30:14,705 DEBUG [RS:0;87fb68174c52:33225 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c63e1fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:30:14,716 DEBUG [RS:0;87fb68174c52:33225 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87fb68174c52:33225 2024-11-07T12:30:14,716 INFO [RS:0;87fb68174c52:33225 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:30:14,716 INFO [RS:0;87fb68174c52:33225 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:30:14,716 DEBUG [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:30:14,717 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(2659): reportForDuty to master=87fb68174c52,36547,1730982614435 with port=33225, startcode=1730982614484 2024-11-07T12:30:14,717 DEBUG [RS:0;87fb68174c52:33225 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:30:14,719 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58371, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:30:14,719 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36547 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87fb68174c52,33225,1730982614484 2024-11-07T12:30:14,720 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36547 {}] master.ServerManager(517): Registering regionserver=87fb68174c52,33225,1730982614484 2024-11-07T12:30:14,721 DEBUG [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699 2024-11-07T12:30:14,721 DEBUG [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38891 2024-11-07T12:30:14,721 DEBUG [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:30:14,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:30:14,724 DEBUG [RS:0;87fb68174c52:33225 {}] zookeeper.ZKUtil(111): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87fb68174c52,33225,1730982614484 2024-11-07T12:30:14,724 WARN [RS:0;87fb68174c52:33225 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:30:14,724 INFO [RS:0;87fb68174c52:33225 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:30:14,724 DEBUG [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484 2024-11-07T12:30:14,724 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87fb68174c52,33225,1730982614484] 2024-11-07T12:30:14,727 INFO [RS:0;87fb68174c52:33225 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:30:14,729 INFO [RS:0;87fb68174c52:33225 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:30:14,730 INFO [RS:0;87fb68174c52:33225 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:30:14,730 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,730 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:30:14,731 INFO [RS:0;87fb68174c52:33225 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:30:14,731 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:30:14,731 DEBUG [RS:0;87fb68174c52:33225 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:30:14,732 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,732 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,732 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,732 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,732 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,732 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,33225,1730982614484-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:30:14,747 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:30:14,747 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,33225,1730982614484-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,747 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,747 INFO [RS:0;87fb68174c52:33225 {}] regionserver.Replication(171): 87fb68174c52,33225,1730982614484 started 2024-11-07T12:30:14,762 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:14,762 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(1482): Serving as 87fb68174c52,33225,1730982614484, RpcServer on 87fb68174c52/172.17.0.3:33225, sessionid=0x1001a38a0330001 2024-11-07T12:30:14,762 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:30:14,762 DEBUG [RS:0;87fb68174c52:33225 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87fb68174c52,33225,1730982614484 2024-11-07T12:30:14,762 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,33225,1730982614484' 2024-11-07T12:30:14,762 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:30:14,762 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:30:14,763 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:30:14,763 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:30:14,763 DEBUG [RS:0;87fb68174c52:33225 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87fb68174c52,33225,1730982614484 2024-11-07T12:30:14,763 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,33225,1730982614484' 2024-11-07T12:30:14,763 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:30:14,763 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:30:14,764 DEBUG [RS:0;87fb68174c52:33225 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:30:14,764 INFO [RS:0;87fb68174c52:33225 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:30:14,764 INFO [RS:0;87fb68174c52:33225 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:30:14,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:14,845 WARN [87fb68174c52:36547 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:30:14,866 INFO [RS:0;87fb68174c52:33225 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C33225%2C1730982614484, suffix=, logDir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484, archiveDir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/oldWALs, maxLogs=32 2024-11-07T12:30:14,866 INFO [RS:0;87fb68174c52:33225 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33225%2C1730982614484.1730982614866 2024-11-07T12:30:14,872 INFO [RS:0;87fb68174c52:33225 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 2024-11-07T12:30:14,873 DEBUG [RS:0;87fb68174c52:33225 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34555:34555),(127.0.0.1/127.0.0.1:45201:45201)] 2024-11-07T12:30:15,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:15,095 DEBUG [87fb68174c52:36547 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:30:15,096 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87fb68174c52,33225,1730982614484 2024-11-07T12:30:15,097 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,33225,1730982614484, state=OPENING 2024-11-07T12:30:15,099 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:30:15,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:15,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:15,100 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:30:15,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,33225,1730982614484}] 2024-11-07T12:30:15,101 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:30:15,101 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:30:15,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:15,254 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:30:15,255 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56959, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:30:15,259 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:30:15,259 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:30:15,261 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C33225%2C1730982614484.meta, suffix=.meta, logDir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484, archiveDir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/oldWALs, maxLogs=32 2024-11-07T12:30:15,261 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta 2024-11-07T12:30:15,266 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta 2024-11-07T12:30:15,267 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45201:45201),(127.0.0.1/127.0.0.1:34555:34555)] 2024-11-07T12:30:15,267 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:30:15,268 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:30:15,268 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:30:15,268 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:30:15,268 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:30:15,268 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:15,268 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:30:15,268 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:30:15,269 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:30:15,270 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:30:15,270 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:15,271 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:15,271 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:30:15,272 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:30:15,272 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:15,272 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:15,272 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:30:15,273 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:30:15,273 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:15,273 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:15,273 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:30:15,274 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:30:15,274 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:15,274 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:15,275 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:30:15,275 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740 2024-11-07T12:30:15,277 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740 2024-11-07T12:30:15,278 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:30:15,278 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:30:15,279 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:30:15,280 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:30:15,281 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722228, jitterRate=-0.0816403180360794}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:30:15,281 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:30:15,282 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730982615268Writing region info on filesystem at 1730982615268Initializing all the Stores at 1730982615269 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982615269Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982615269Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982615269Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982615269Cleaning up temporary data from old regions at 1730982615278 (+9 ms)Running coprocessor post-open hooks at 1730982615281 (+3 ms)Region opened successfully at 1730982615282 (+1 ms) 2024-11-07T12:30:15,283 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730982615253 2024-11-07T12:30:15,285 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:30:15,285 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:30:15,286 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,33225,1730982614484 2024-11-07T12:30:15,287 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,33225,1730982614484, state=OPEN 2024-11-07T12:30:15,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:30:15,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:30:15,291 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87fb68174c52,33225,1730982614484 2024-11-07T12:30:15,292 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:30:15,292 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:30:15,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:30:15,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,33225,1730982614484 in 190 msec 2024-11-07T12:30:15,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:30:15,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 602 msec 2024-11-07T12:30:15,298 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:30:15,298 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:30:15,299 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:30:15,299 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,33225,1730982614484, seqNum=-1] 2024-11-07T12:30:15,300 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:30:15,301 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47703, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:30:15,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 654 msec 2024-11-07T12:30:15,306 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730982615306, completionTime=-1 2024-11-07T12:30:15,306 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:30:15,306 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-07T12:30:15,308 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-07T12:30:15,308 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730982675308 2024-11-07T12:30:15,308 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730982735308 2024-11-07T12:30:15,308 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-07T12:30:15,309 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36547,1730982614435-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:15,309 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36547,1730982614435-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:15,309 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36547,1730982614435-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:15,309 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87fb68174c52:36547, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:15,309 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:15,309 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:15,310 DEBUG [master/87fb68174c52:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:30:15,313 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.791sec 2024-11-07T12:30:15,313 INFO [master/87fb68174c52:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:30:15,313 INFO [master/87fb68174c52:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:30:15,313 INFO [master/87fb68174c52:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:30:15,313 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:30:15,313 INFO [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:30:15,313 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36547,1730982614435-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:30:15,313 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36547,1730982614435-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:30:15,315 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:30:15,315 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:30:15,315 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,36547,1730982614435-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:15,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77c83f30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:30:15,400 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87fb68174c52,36547,-1 for getting cluster id 2024-11-07T12:30:15,400 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:30:15,401 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '51c96f3c-1de7-4c3f-8c6d-79397f9d9be7' 2024-11-07T12:30:15,402 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:30:15,402 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "51c96f3c-1de7-4c3f-8c6d-79397f9d9be7" 2024-11-07T12:30:15,402 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5debbeb6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:30:15,402 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87fb68174c52,36547,-1] 2024-11-07T12:30:15,402 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:30:15,403 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:15,404 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45230, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:30:15,405 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c10cfb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:30:15,405 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:30:15,406 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,33225,1730982614484, seqNum=-1] 2024-11-07T12:30:15,406 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:30:15,408 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53930, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:30:15,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87fb68174c52,36547,1730982614435 2024-11-07T12:30:15,410 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:15,413 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:30:15,413 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-07T12:30:15,413 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-07T12:30:15,413 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T12:30:15,414 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 87fb68174c52,36547,1730982614435 2024-11-07T12:30:15,414 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2161d60a 2024-11-07T12:30:15,414 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:30:15,415 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45246, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:30:15,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36547 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-07T12:30:15,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36547 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-07T12:30:15,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36547 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:30:15,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36547 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-07T12:30:15,419 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:30:15,419 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:15,419 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36547 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-07T12:30:15,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36547 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:30:15,420 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:30:15,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35641 is added to blk_1073741835_1011 (size=395) 2024-11-07T12:30:15,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41465 is added to blk_1073741835_1011 (size=395) 2024-11-07T12:30:15,429 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7733a6da8319cf9c842f4bb43e32090e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699 2024-11-07T12:30:15,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35641 is added to blk_1073741836_1012 (size=78) 2024-11-07T12:30:15,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41465 is added to blk_1073741836_1012 (size=78) 2024-11-07T12:30:15,438 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:15,438 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 7733a6da8319cf9c842f4bb43e32090e, disabling compactions & flushes 2024-11-07T12:30:15,438 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:15,438 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:15,438 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. after waiting 0 ms 2024-11-07T12:30:15,438 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:15,438 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:15,438 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7733a6da8319cf9c842f4bb43e32090e: Waiting for close lock at 1730982615438Disabling compacts and flushes for region at 1730982615438Disabling writes for close at 1730982615438Writing region close event to WAL at 1730982615438Closed at 1730982615438 2024-11-07T12:30:15,439 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:30:15,440 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1730982615440"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730982615440"}]},"ts":"1730982615440"} 2024-11-07T12:30:15,442 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T12:30:15,443 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:30:15,444 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730982615443"}]},"ts":"1730982615443"} 2024-11-07T12:30:15,445 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-07T12:30:15,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7733a6da8319cf9c842f4bb43e32090e, ASSIGN}] 2024-11-07T12:30:15,447 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7733a6da8319cf9c842f4bb43e32090e, ASSIGN 2024-11-07T12:30:15,448 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7733a6da8319cf9c842f4bb43e32090e, ASSIGN; state=OFFLINE, location=87fb68174c52,33225,1730982614484; forceNewPlan=false, retain=false 2024-11-07T12:30:15,599 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7733a6da8319cf9c842f4bb43e32090e, regionState=OPENING, regionLocation=87fb68174c52,33225,1730982614484 2024-11-07T12:30:15,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7733a6da8319cf9c842f4bb43e32090e, ASSIGN because future has completed 2024-11-07T12:30:15,602 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7733a6da8319cf9c842f4bb43e32090e, server=87fb68174c52,33225,1730982614484}] 2024-11-07T12:30:15,758 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:15,758 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7733a6da8319cf9c842f4bb43e32090e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:30:15,759 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,759 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:15,759 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,759 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,760 INFO [StoreOpener-7733a6da8319cf9c842f4bb43e32090e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,762 INFO [StoreOpener-7733a6da8319cf9c842f4bb43e32090e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7733a6da8319cf9c842f4bb43e32090e columnFamilyName info 2024-11-07T12:30:15,762 DEBUG [StoreOpener-7733a6da8319cf9c842f4bb43e32090e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:15,762 INFO [StoreOpener-7733a6da8319cf9c842f4bb43e32090e-1 {}] regionserver.HStore(327): Store=7733a6da8319cf9c842f4bb43e32090e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:30:15,762 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,763 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/default/TestLogRolling-testLogRollOnPipelineRestart/7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,763 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/default/TestLogRolling-testLogRollOnPipelineRestart/7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,764 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,764 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,765 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,767 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/default/TestLogRolling-testLogRollOnPipelineRestart/7733a6da8319cf9c842f4bb43e32090e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:30:15,768 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7733a6da8319cf9c842f4bb43e32090e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=850438, jitterRate=0.08138798177242279}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:30:15,768 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:15,768 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7733a6da8319cf9c842f4bb43e32090e: Running coprocessor pre-open hook at 1730982615759Writing region info on filesystem at 1730982615759Initializing all the Stores at 1730982615760 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982615760Cleaning up temporary data from old regions at 1730982615764 (+4 ms)Running coprocessor post-open hooks at 1730982615768 (+4 ms)Region opened successfully at 1730982615768 2024-11-07T12:30:15,770 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e., pid=6, masterSystemTime=1730982615754 2024-11-07T12:30:15,772 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:15,772 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:15,773 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7733a6da8319cf9c842f4bb43e32090e, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,33225,1730982614484 2024-11-07T12:30:15,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7733a6da8319cf9c842f4bb43e32090e, server=87fb68174c52,33225,1730982614484 because future has completed 2024-11-07T12:30:15,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:30:15,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7733a6da8319cf9c842f4bb43e32090e, server=87fb68174c52,33225,1730982614484 in 175 msec 2024-11-07T12:30:15,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:30:15,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7733a6da8319cf9c842f4bb43e32090e, ASSIGN in 333 msec 2024-11-07T12:30:15,783 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:30:15,783 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730982615783"}]},"ts":"1730982615783"} 2024-11-07T12:30:15,785 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-07T12:30:15,786 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:30:15,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 370 msec 2024-11-07T12:30:15,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:16,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:16,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:16,517 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T12:30:16,517 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-07T12:30:16,518 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-07T12:30:16,518 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-07T12:30:16,519 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:30:16,519 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-07T12:30:16,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:17,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:17,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:17,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:18,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:18,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:18,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:19,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:19,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:19,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:20,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:20,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:20,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,790 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:30:20,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:20,817 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:30:20,817 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-07T12:30:20,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:21,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:21,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:21,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:22,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:22,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:22,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:23,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:23,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:23,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:24,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:24,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:24,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:25,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:25,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:25,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36547 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:30:25,438 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-07T12:30:25,438 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-07T12:30:25,441 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-07T12:30:25,441 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:25,444 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e., hostname=87fb68174c52,33225,1730982614484, seqNum=2] 2024-11-07T12:30:25,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:26,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:26,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:26,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:27,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:27,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:27,447 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 2024-11-07T12:30:27,448 WARN [ResponseProcessor for block BP-1179130194-172.17.0.3-1730982613764:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1179130194-172.17.0.3-1730982613764:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:27,448 WARN [ResponseProcessor for block BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:35641,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:27,448 WARN [ResponseProcessor for block BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:35641,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:27,448 WARN [DataStreamer for file /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta block BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK], DatanodeInfoWithStorage[127.0.0.1:35641,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35641,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]) is bad. 2024-11-07T12:30:27,448 WARN [DataStreamer for file /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435/87fb68174c52%2C36547%2C1730982614435.1730982614578 block BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK], DatanodeInfoWithStorage[127.0.0.1:35641,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35641,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]) is bad. 2024-11-07T12:30:27,448 WARN [DataStreamer for file /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 block BP-1179130194-172.17.0.3-1730982613764:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1179130194-172.17.0.3-1730982613764:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35641,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK], DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35641,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]) is bad. 2024-11-07T12:30:27,448 WARN [PacketResponder: BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35641] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,448 WARN [PacketResponder: BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35641] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1500621439_22 at /127.0.0.1:38894 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38894 dst: /127.0.0.1:41465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1164838792_22 at /127.0.0.1:38938 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38938 dst: /127.0.0.1:41465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,450 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1164838792_22 at /127.0.0.1:59154 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35641:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59154 dst: /127.0.0.1:35641 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,450 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1500621439_22 at /127.0.0.1:59122 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35641:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59122 dst: /127.0.0.1:35641 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1164838792_22 at /127.0.0.1:38930 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38930 dst: /127.0.0.1:41465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,451 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6fab6db5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:27,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1164838792_22 at /127.0.0.1:59150 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35641:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59150 dst: /127.0.0.1:35641 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,452 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43a454f0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:30:27,452 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:30:27,452 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28441b3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:30:27,452 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11b4bf4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,STOPPED} 2024-11-07T12:30:27,455 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:30:27,455 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1179130194-172.17.0.3-1730982613764 (Datanode Uuid 2f76db2f-1739-4f70-930b-40cb8a44a362) service to localhost/127.0.0.1:38891 2024-11-07T12:30:27,455 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:30:27,456 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:30:27,456 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:30:27,456 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data4/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:27,456 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data3/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:27,483 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:27,486 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:30:27,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:30:27,488 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:30:27,488 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:30:27,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@235b3635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:30:27,489 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431e378c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:30:27,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ad0086e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/java.io.tmpdir/jetty-localhost-39315-hadoop-hdfs-3_4_1-tests_jar-_-any-7533007813593366225/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:27,605 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b5f8885{HTTP/1.1, (http/1.1)}{localhost:39315} 2024-11-07T12:30:27,605 INFO [Time-limited test {}] server.Server(415): Started @166277ms 2024-11-07T12:30:27,607 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:30:27,626 WARN [ResponseProcessor for block BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:27,626 WARN [ResponseProcessor for block BP-1179130194-172.17.0.3-1730982613764:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1179130194-172.17.0.3-1730982613764:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:27,626 WARN [ResponseProcessor for block BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:27,627 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1164838792_22 at /127.0.0.1:53038 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53038 dst: /127.0.0.1:41465 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,627 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1164838792_22 at /127.0.0.1:53054 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53054 dst: /127.0.0.1:41465 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,627 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1500621439_22 at /127.0.0.1:53070 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53070 dst: /127.0.0.1:41465 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:27,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60017892{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:27,630 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fefca8b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:30:27,630 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:30:27,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66c0323e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:30:27,631 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@354edf1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,STOPPED} 2024-11-07T12:30:27,632 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:30:27,632 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:30:27,632 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:30:27,632 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1179130194-172.17.0.3-1730982613764 (Datanode Uuid 0e35ec97-7186-4c94-b36a-44916e277331) service to localhost/127.0.0.1:38891 2024-11-07T12:30:27,632 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data1/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:27,632 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data2/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:27,633 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:30:27,640 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:27,644 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:30:27,645 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:30:27,645 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:30:27,645 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:30:27,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@673d3bba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:30:27,646 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62f6e774{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:30:27,706 WARN [Thread-1342 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:30:27,709 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xffe074a96318cac9 with lease ID 0x84e561ebb1dcf7cd: from storage DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe node DatanodeRegistration(127.0.0.1:42711, datanodeUuid=2f76db2f-1739-4f70-930b-40cb8a44a362, infoPort=42121, infoSecurePort=0, ipcPort=44513, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:27,709 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xffe074a96318cac9 with lease ID 0x84e561ebb1dcf7cd: from storage DS-f9472b55-b87e-4e87-bb60-cfb8b6c4da2f node DatanodeRegistration(127.0.0.1:42711, datanodeUuid=2f76db2f-1739-4f70-930b-40cb8a44a362, infoPort=42121, infoSecurePort=0, ipcPort=44513, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:27,765 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22dbe5ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/java.io.tmpdir/jetty-localhost-33907-hadoop-hdfs-3_4_1-tests_jar-_-any-15261476965332492681/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:27,765 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@343c572e{HTTP/1.1, (http/1.1)}{localhost:33907} 2024-11-07T12:30:27,765 INFO [Time-limited test {}] server.Server(415): Started @166437ms 2024-11-07T12:30:27,766 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:30:27,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:27,852 WARN [Thread-1373 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:30:27,854 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaa1758e98aeba13f with lease ID 0x84e561ebb1dcf7ce: from storage DS-f38ef4b4-466d-4084-84a0-2b69237f20d6 node DatanodeRegistration(127.0.0.1:36717, datanodeUuid=0e35ec97-7186-4c94-b36a-44916e277331, infoPort=43509, infoSecurePort=0, ipcPort=41665, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:27,854 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaa1758e98aeba13f with lease ID 0x84e561ebb1dcf7ce: from storage DS-96c5feb9-c631-464d-ba92-6f0a3b3c2963 node DatanodeRegistration(127.0.0.1:36717, datanodeUuid=0e35ec97-7186-4c94-b36a-44916e277331, infoPort=43509, infoSecurePort=0, ipcPort=41665, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:28,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:28,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:28,784 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-07T12:30:28,786 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-07T12:30:28,787 ERROR [FSHLog-0-hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699-prefix:87fb68174c52,33225,1730982614484 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:28,788 WARN [FSHLog-0-hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699-prefix:87fb68174c52,33225,1730982614484 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:28,788 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C33225%2C1730982614484:(num 1730982614866) roll requested 2024-11-07T12:30:28,788 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33225%2C1730982614484.1730982628788 2024-11-07T12:30:28,794 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 newFile=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 2024-11-07T12:30:28,794 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:28,794 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:28,794 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:28,794 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:28,794 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:28,794 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 2024-11-07T12:30:28,795 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:28,795 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:28,795 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 2024-11-07T12:30:28,795 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43509:43509),(127.0.0.1/127.0.0.1:42121:42121)] 2024-11-07T12:30:28,795 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 is not closed yet, will try archiving it next time 2024-11-07T12:30:28,795 WARN [IPC Server handler 3 on default port 38891 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-07T12:30:28,796 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 after 1ms 2024-11-07T12:30:28,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:29,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:29,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:29,709 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-07T12:30:29,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:30,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:30,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:30,798 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-07T12:30:30,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:31,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:31,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:31,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:32,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:32,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:32,796 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 after 4001ms 2024-11-07T12:30:32,801 WARN [ResponseProcessor for block BP-1179130194-172.17.0.3-1730982613764:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1179130194-172.17.0.3-1730982613764:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:32,802 WARN [DataStreamer for file /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 block BP-1179130194-172.17.0.3-1730982613764:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1179130194-172.17.0.3-1730982613764:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36717,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK], DatanodeInfoWithStorage[127.0.0.1:42711,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36717,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]) is bad. 2024-11-07T12:30:32,802 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1164838792_22 at /127.0.0.1:40636 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40636 dst: /127.0.0.1:42711 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:32,802 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1164838792_22 at /127.0.0.1:35848 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36717:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35848 dst: /127.0.0.1:36717 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:32,803 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22dbe5ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:32,804 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@343c572e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:30:32,804 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:30:32,804 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62f6e774{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:30:32,804 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@673d3bba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,STOPPED} 2024-11-07T12:30:32,805 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:30:32,805 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:30:32,805 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:30:32,805 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1179130194-172.17.0.3-1730982613764 (Datanode Uuid 0e35ec97-7186-4c94-b36a-44916e277331) service to localhost/127.0.0.1:38891 2024-11-07T12:30:32,806 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data1/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:32,806 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data2/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:32,806 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:30:32,814 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:32,818 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:30:32,819 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:30:32,819 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:30:32,819 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:30:32,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45604664{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:30:32,820 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32c717fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:30:32,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:32,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78dcc37b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/java.io.tmpdir/jetty-localhost-35657-hadoop-hdfs-3_4_1-tests_jar-_-any-953493952309040626/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:32,936 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a382d25{HTTP/1.1, (http/1.1)}{localhost:35657} 2024-11-07T12:30:32,936 INFO [Time-limited test {}] server.Server(415): Started @171608ms 2024-11-07T12:30:32,937 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:30:32,955 WARN [ResponseProcessor for block BP-1179130194-172.17.0.3-1730982613764:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1179130194-172.17.0.3-1730982613764:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:32,955 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1164838792_22 at /127.0.0.1:40664 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40664 dst: /127.0.0.1:42711 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:32,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ad0086e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:32,959 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b5f8885{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:30:32,959 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:30:32,959 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431e378c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:30:32,959 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@235b3635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,STOPPED} 2024-11-07T12:30:32,960 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:30:32,960 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:30:32,960 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1179130194-172.17.0.3-1730982613764 (Datanode Uuid 2f76db2f-1739-4f70-930b-40cb8a44a362) service to localhost/127.0.0.1:38891 2024-11-07T12:30:32,960 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:30:32,961 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data3/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:32,961 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data4/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:32,962 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:30:32,970 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:32,973 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:30:32,974 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:30:32,974 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:30:32,974 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:30:32,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff0f915{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:30:32,975 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ebbad67{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:30:33,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:33,026 WARN [Thread-1416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:30:33,029 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbc738634d0c0291a with lease ID 0x84e561ebb1dcf7cf: from storage DS-f38ef4b4-466d-4084-84a0-2b69237f20d6 node DatanodeRegistration(127.0.0.1:45747, datanodeUuid=0e35ec97-7186-4c94-b36a-44916e277331, infoPort=39161, infoSecurePort=0, ipcPort=34557, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:33,029 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbc738634d0c0291a with lease ID 0x84e561ebb1dcf7cf: from storage DS-96c5feb9-c631-464d-ba92-6f0a3b3c2963 node DatanodeRegistration(127.0.0.1:45747, datanodeUuid=0e35ec97-7186-4c94-b36a-44916e277331, infoPort=39161, infoSecurePort=0, ipcPort=34557, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:33,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@468f57bd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/java.io.tmpdir/jetty-localhost-33433-hadoop-hdfs-3_4_1-tests_jar-_-any-16886634014288487607/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:33,095 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5091fc79{HTTP/1.1, (http/1.1)}{localhost:33433} 2024-11-07T12:30:33,095 INFO [Time-limited test {}] server.Server(415): Started @171767ms 2024-11-07T12:30:33,097 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:30:33,175 WARN [Thread-1447 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:30:33,177 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6665881b85162878 with lease ID 0x84e561ebb1dcf7d0: from storage DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe node DatanodeRegistration(127.0.0.1:37561, datanodeUuid=2f76db2f-1739-4f70-930b-40cb8a44a362, infoPort=34599, infoSecurePort=0, ipcPort=33807, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:33,178 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6665881b85162878 with lease ID 0x84e561ebb1dcf7d0: from storage DS-f9472b55-b87e-4e87-bb60-cfb8b6c4da2f node DatanodeRegistration(127.0.0.1:37561, datanodeUuid=2f76db2f-1739-4f70-930b-40cb8a44a362, infoPort=34599, infoSecurePort=0, ipcPort=33807, storageInfo=lv=-57;cid=testClusterID;nsid=975546529;c=1730982613764), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:33,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:33,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:34,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:34,114 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-07T12:30:34,116 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-07T12:30:34,117 ERROR [FSHLog-0-hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699-prefix:87fb68174c52,33225,1730982614484 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42711,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:34,117 WARN [FSHLog-0-hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699-prefix:87fb68174c52,33225,1730982614484 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42711,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:34,118 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C33225%2C1730982614484:(num 1730982628788) roll requested 2024-11-07T12:30:34,118 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33225%2C1730982614484.1730982634118 2024-11-07T12:30:34,123 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 newFile=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982634118 2024-11-07T12:30:34,123 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:34,124 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:34,124 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:34,124 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:34,124 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:34,124 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982634118 2024-11-07T12:30:34,124 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42711,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:34,124 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42711,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:34,124 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 2024-11-07T12:30:34,125 WARN [IPC Server handler 1 on default port 38891 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-07T12:30:34,125 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 after 1ms 2024-11-07T12:30:34,128 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39161:39161),(127.0.0.1/127.0.0.1:34599:34599)] 2024-11-07T12:30:34,128 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 is not closed yet, will try archiving it next time 2024-11-07T12:30:34,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:34,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:35,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:35,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:35,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:36,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:36,130 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33225%2C1730982614484.1730982636129 2024-11-07T12:30:36,135 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982634118 newFile=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 2024-11-07T12:30:36,135 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:36,135 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:36,136 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:36,136 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:36,136 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:36,136 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982634118 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 2024-11-07T12:30:36,137 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34599:34599),(127.0.0.1/127.0.0.1:39161:39161)] 2024-11-07T12:30:36,137 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 is not closed yet, will try archiving it next time 2024-11-07T12:30:36,137 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982634118 is not closed yet, will try archiving it next time 2024-11-07T12:30:36,137 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 2024-11-07T12:30:36,137 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 2024-11-07T12:30:36,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741838_1019 (size=1264) 2024-11-07T12:30:36,138 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 after 1ms 2024-11-07T12:30:36,138 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 2024-11-07T12:30:36,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741838_1019 (size=1264) 2024-11-07T12:30:36,138 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 is not closed yet, will try archiving it next time 2024-11-07T12:30:36,146 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1730982615769/Put/vlen=218/seqid=0] 2024-11-07T12:30:36,147 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1730982625445/Put/vlen=1045/seqid=0] 2024-11-07T12:30:36,147 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982614866 2024-11-07T12:30:36,147 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 2024-11-07T12:30:36,147 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 2024-11-07T12:30:36,147 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 after 0ms 2024-11-07T12:30:36,147 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 2024-11-07T12:30:36,150 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1730982628787/Put/vlen=1045/seqid=0] 2024-11-07T12:30:36,150 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1730982630799/Put/vlen=1045/seqid=0] 2024-11-07T12:30:36,150 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 2024-11-07T12:30:36,150 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982634118 2024-11-07T12:30:36,150 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982634118 2024-11-07T12:30:36,151 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982634118 after 1ms 2024-11-07T12:30:36,151 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982634118 2024-11-07T12:30:36,154 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1730982634117/Put/vlen=1045/seqid=0] 2024-11-07T12:30:36,154 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 2024-11-07T12:30:36,154 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 2024-11-07T12:30:36,154 WARN [IPC Server handler 4 on default port 38891 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-07T12:30:36,154 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 after 0ms 2024-11-07T12:30:36,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:36,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:37,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:37,032 WARN [ResponseProcessor for block BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:37,032 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1500621439_22 at /127.0.0.1:57864 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57864 dst: /127.0.0.1:37561 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:37561 remote=/127.0.0.1:57864]. Total timeout mills is 60000, 59103 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:37,032 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1500621439_22 at /127.0.0.1:44768 [Receiving block BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44768 dst: /127.0.0.1:45747 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:37,032 WARN [DataStreamer for file /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 block BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37561,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK], DatanodeInfoWithStorage[127.0.0.1:45747,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37561,DS-436dedf8-cbd1-4a37-8b67-0255db24e0fe,DISK]) is bad. 2024-11-07T12:30:37,033 WARN [DataStreamer for file /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 block BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:37,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741839_1022 (size=85) 2024-11-07T12:30:37,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:37,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:38,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:38,029 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-07T12:30:38,126 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982628788 after 4002ms 2024-11-07T12:30:38,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:38,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:39,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:39,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:39,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:40,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:40,155 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 after 4001ms 2024-11-07T12:30:40,155 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 2024-11-07T12:30:40,159 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 2024-11-07T12:30:40,159 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-07T12:30:40,159 ERROR [FSHLog-0-hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699-prefix:87fb68174c52,33225,1730982614484.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:40,160 WARN [FSHLog-0-hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699-prefix:87fb68174c52,33225,1730982614484.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:40,160 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C33225%2C1730982614484.meta:.meta(num 1730982615261) roll requested 2024-11-07T12:30:40,160 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33225%2C1730982614484.meta.1730982640160.meta 2024-11-07T12:30:40,165 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,165 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,165 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,165 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,165 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,165 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.meta.1730982640160.meta 2024-11-07T12:30:40,166 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:40,166 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:40,166 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta 2024-11-07T12:30:40,166 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39161:39161),(127.0.0.1/127.0.0.1:34599:34599)] 2024-11-07T12:30:40,166 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta is not closed yet, will try archiving it next time 2024-11-07T12:30:40,166 WARN [IPC Server handler 1 on default port 38891 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-11-07T12:30:40,166 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta after 0ms 2024-11-07T12:30:40,183 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/.tmp/info/94705aed5e18406789766600eba3af1e is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e./info:regioninfo/1730982615773/Put/seqid=0 2024-11-07T12:30:40,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741841_1025 (size=7125) 2024-11-07T12:30:40,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741841_1025 (size=7125) 2024-11-07T12:30:40,188 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/.tmp/info/94705aed5e18406789766600eba3af1e 2024-11-07T12:30:40,207 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/.tmp/ns/a717ce5a711841848602df7899336ed8 is 43, key is default/ns:d/1730982615301/Put/seqid=0 2024-11-07T12:30:40,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741842_1026 (size=5153) 2024-11-07T12:30:40,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741842_1026 (size=5153) 2024-11-07T12:30:40,212 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/.tmp/ns/a717ce5a711841848602df7899336ed8 2024-11-07T12:30:40,231 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/.tmp/table/1fb371a58e1741148848e62557f7d89a is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1730982615783/Put/seqid=0 2024-11-07T12:30:40,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741843_1027 (size=5438) 2024-11-07T12:30:40,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741843_1027 (size=5438) 2024-11-07T12:30:40,235 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/.tmp/table/1fb371a58e1741148848e62557f7d89a 2024-11-07T12:30:40,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:40,240 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/.tmp/info/94705aed5e18406789766600eba3af1e as hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/info/94705aed5e18406789766600eba3af1e 2024-11-07T12:30:40,245 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/info/94705aed5e18406789766600eba3af1e, entries=10, sequenceid=11, filesize=7.0 K 2024-11-07T12:30:40,246 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/.tmp/ns/a717ce5a711841848602df7899336ed8 as hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/ns/a717ce5a711841848602df7899336ed8 2024-11-07T12:30:40,251 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/ns/a717ce5a711841848602df7899336ed8, entries=2, sequenceid=11, filesize=5.0 K 2024-11-07T12:30:40,251 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/.tmp/table/1fb371a58e1741148848e62557f7d89a as hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/table/1fb371a58e1741148848e62557f7d89a 2024-11-07T12:30:40,256 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/table/1fb371a58e1741148848e62557f7d89a, entries=2, sequenceid=11, filesize=5.3 K 2024-11-07T12:30:40,257 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false 2024-11-07T12:30:40,257 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-07T12:30:40,257 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7733a6da8319cf9c842f4bb43e32090e 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-07T12:30:40,257 ERROR [FSHLog-0-hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699-prefix:87fb68174c52,33225,1730982614484 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:40,258 WARN [FSHLog-0-hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699-prefix:87fb68174c52,33225,1730982614484 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:40,258 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C33225%2C1730982614484:(num 1730982636129) roll requested 2024-11-07T12:30:40,258 INFO [regionserver/87fb68174c52:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33225%2C1730982614484.1730982640258 2024-11-07T12:30:40,263 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 newFile=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982640258 2024-11-07T12:30:40,263 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,263 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,263 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,263 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,263 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,263 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982640258 2024-11-07T12:30:40,263 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:40,264 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34599:34599),(127.0.0.1/127.0.0.1:39161:39161)] 2024-11-07T12:30:40,264 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1179130194-172.17.0.3-1730982613764:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:40,264 DEBUG [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 is not closed yet, will try archiving it next time 2024-11-07T12:30:40,264 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 2024-11-07T12:30:40,264 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 after 0ms 2024-11-07T12:30:40,265 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.1730982636129 to hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/oldWALs/87fb68174c52%2C33225%2C1730982614484.1730982636129 2024-11-07T12:30:40,279 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/default/TestLogRolling-testLogRollOnPipelineRestart/7733a6da8319cf9c842f4bb43e32090e/.tmp/info/deb4e178c97746efbf89fa9a8db78668 is 1080, key is row1002/info:/1730982625445/Put/seqid=0 2024-11-07T12:30:40,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741845_1029 (size=9270) 2024-11-07T12:30:40,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741845_1029 (size=9270) 2024-11-07T12:30:40,284 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/default/TestLogRolling-testLogRollOnPipelineRestart/7733a6da8319cf9c842f4bb43e32090e/.tmp/info/deb4e178c97746efbf89fa9a8db78668 2024-11-07T12:30:40,290 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/default/TestLogRolling-testLogRollOnPipelineRestart/7733a6da8319cf9c842f4bb43e32090e/.tmp/info/deb4e178c97746efbf89fa9a8db78668 as hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/default/TestLogRolling-testLogRollOnPipelineRestart/7733a6da8319cf9c842f4bb43e32090e/info/deb4e178c97746efbf89fa9a8db78668 2024-11-07T12:30:40,295 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/default/TestLogRolling-testLogRollOnPipelineRestart/7733a6da8319cf9c842f4bb43e32090e/info/deb4e178c97746efbf89fa9a8db78668, entries=4, sequenceid=8, filesize=9.1 K 2024-11-07T12:30:40,296 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 7733a6da8319cf9c842f4bb43e32090e in 38ms, sequenceid=8, compaction requested=false 2024-11-07T12:30:40,296 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7733a6da8319cf9c842f4bb43e32090e: 2024-11-07T12:30:40,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:30:40,301 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:30:40,301 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:30:40,301 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:40,301 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:40,301 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:30:40,301 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:30:40,301 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1746566449, stopped=false 2024-11-07T12:30:40,301 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87fb68174c52,36547,1730982614435 2024-11-07T12:30:40,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:30:40,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:30:40,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:40,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:40,303 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:30:40,303 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:30:40,303 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:30:40,304 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:40,304 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:30:40,304 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87fb68174c52,33225,1730982614484' ***** 2024-11-07T12:30:40,304 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:30:40,304 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:30:40,304 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:30:40,304 INFO [RS:0;87fb68174c52:33225 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:30:40,304 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:30:40,304 INFO [RS:0;87fb68174c52:33225 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:30:40,304 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(3091): Received CLOSE for 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:40,305 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(959): stopping server 87fb68174c52,33225,1730982614484 2024-11-07T12:30:40,305 INFO [RS:0;87fb68174c52:33225 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:30:40,305 INFO [RS:0;87fb68174c52:33225 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87fb68174c52:33225. 2024-11-07T12:30:40,305 DEBUG [RS:0;87fb68174c52:33225 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:30:40,305 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7733a6da8319cf9c842f4bb43e32090e, disabling compactions & flushes 2024-11-07T12:30:40,305 DEBUG [RS:0;87fb68174c52:33225 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:40,305 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:40,305 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:40,305 INFO [RS:0;87fb68174c52:33225 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:30:40,305 INFO [RS:0;87fb68174c52:33225 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:30:40,305 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. after waiting 0 ms 2024-11-07T12:30:40,305 INFO [RS:0;87fb68174c52:33225 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:30:40,305 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:40,305 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:30:40,305 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-07T12:30:40,305 DEBUG [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 7733a6da8319cf9c842f4bb43e32090e=TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e.} 2024-11-07T12:30:40,306 DEBUG [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7733a6da8319cf9c842f4bb43e32090e 2024-11-07T12:30:40,306 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:30:40,306 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:30:40,306 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:30:40,306 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:30:40,306 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:30:40,310 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/default/TestLogRolling-testLogRollOnPipelineRestart/7733a6da8319cf9c842f4bb43e32090e/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-07T12:30:40,310 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:40,310 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-07T12:30:40,310 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7733a6da8319cf9c842f4bb43e32090e: Waiting for close lock at 1730982640305Running coprocessor pre-close hooks at 1730982640305Disabling compacts and flushes for region at 1730982640305Disabling writes for close at 1730982640305Writing region close event to WAL at 1730982640306 (+1 ms)Running coprocessor post-close hooks at 1730982640310 (+4 ms)Closed at 1730982640310 2024-11-07T12:30:40,311 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1730982615416.7733a6da8319cf9c842f4bb43e32090e. 2024-11-07T12:30:40,311 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:30:40,311 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:30:40,311 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982640306Running coprocessor pre-close hooks at 1730982640306Disabling compacts and flushes for region at 1730982640306Disabling writes for close at 1730982640306Writing region close event to WAL at 1730982640307 (+1 ms)Running coprocessor post-close hooks at 1730982640311 (+4 ms)Closed at 1730982640311 2024-11-07T12:30:40,311 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:30:40,506 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(976): stopping server 87fb68174c52,33225,1730982614484; all regions closed. 2024-11-07T12:30:40,506 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,506 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,507 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,507 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,507 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:40,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741840_1023 (size=825) 2024-11-07T12:30:40,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741840_1023 (size=825) 2024-11-07T12:30:40,733 INFO [regionserver/87fb68174c52:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:30:40,816 INFO [regionserver/87fb68174c52:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T12:30:40,816 INFO [regionserver/87fb68174c52:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T12:30:40,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:41,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:41,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:41,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:42,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:42,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:42,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:43,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:43,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:43,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:44,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:44,167 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta after 4001ms 2024-11-07T12:30:44,168 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/WALs/87fb68174c52,33225,1730982614484/87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta to hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/oldWALs/87fb68174c52%2C33225%2C1730982614484.meta.1730982615261.meta 2024-11-07T12:30:44,170 DEBUG [RS:0;87fb68174c52:33225 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/oldWALs 2024-11-07T12:30:44,170 INFO [RS:0;87fb68174c52:33225 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C33225%2C1730982614484.meta:.meta(num 1730982640160) 2024-11-07T12:30:44,171 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,171 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,171 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,171 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,171 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741844_1028 (size=1162) 2024-11-07T12:30:44,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741844_1028 (size=1162) 2024-11-07T12:30:44,177 DEBUG [RS:0;87fb68174c52:33225 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/oldWALs 2024-11-07T12:30:44,177 INFO [RS:0;87fb68174c52:33225 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C33225%2C1730982614484:(num 1730982640258) 2024-11-07T12:30:44,177 DEBUG [RS:0;87fb68174c52:33225 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:44,177 INFO [RS:0;87fb68174c52:33225 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:30:44,177 INFO [RS:0;87fb68174c52:33225 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:30:44,177 INFO [RS:0;87fb68174c52:33225 {}] hbase.ChoreService(370): Chore service for: regionserver/87fb68174c52:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-07T12:30:44,177 INFO [RS:0;87fb68174c52:33225 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:30:44,177 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:30:44,178 INFO [RS:0;87fb68174c52:33225 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33225 2024-11-07T12:30:44,178 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-07T12:30:44,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87fb68174c52,33225,1730982614484 2024-11-07T12:30:44,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:30:44,180 INFO [RS:0;87fb68174c52:33225 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:30:44,181 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87fb68174c52,33225,1730982614484] 2024-11-07T12:30:44,182 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87fb68174c52,33225,1730982614484 already deleted, retry=false 2024-11-07T12:30:44,183 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87fb68174c52,33225,1730982614484 expired; onlineServers=0 2024-11-07T12:30:44,183 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87fb68174c52,36547,1730982614435' ***** 2024-11-07T12:30:44,183 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:30:44,183 INFO [M:0;87fb68174c52:36547 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:30:44,183 INFO [M:0;87fb68174c52:36547 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:30:44,183 DEBUG [M:0;87fb68174c52:36547 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:30:44,183 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:30:44,183 DEBUG [M:0;87fb68174c52:36547 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:30:44,183 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982614661 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982614661,5,FailOnTimeoutGroup] 2024-11-07T12:30:44,183 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982614661 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982614661,5,FailOnTimeoutGroup] 2024-11-07T12:30:44,183 INFO [M:0;87fb68174c52:36547 {}] hbase.ChoreService(370): Chore service for: master/87fb68174c52:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:30:44,183 INFO [M:0;87fb68174c52:36547 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:30:44,183 DEBUG [M:0;87fb68174c52:36547 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:30:44,183 INFO [M:0;87fb68174c52:36547 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:30:44,184 INFO [M:0;87fb68174c52:36547 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:30:44,184 INFO [M:0;87fb68174c52:36547 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:30:44,184 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:30:44,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:30:44,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:44,185 DEBUG [M:0;87fb68174c52:36547 {}] zookeeper.ZKUtil(347): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:30:44,185 WARN [M:0;87fb68174c52:36547 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:30:44,185 INFO [M:0;87fb68174c52:36547 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/.lastflushedseqids 2024-11-07T12:30:44,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741846_1030 (size=139) 2024-11-07T12:30:44,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741846_1030 (size=139) 2024-11-07T12:30:44,191 INFO [M:0;87fb68174c52:36547 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:30:44,191 INFO [M:0;87fb68174c52:36547 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:30:44,191 DEBUG [M:0;87fb68174c52:36547 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:30:44,191 INFO [M:0;87fb68174c52:36547 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:44,191 DEBUG [M:0;87fb68174c52:36547 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:44,192 DEBUG [M:0;87fb68174c52:36547 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:30:44,192 DEBUG [M:0;87fb68174c52:36547 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:44,192 INFO [M:0;87fb68174c52:36547 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-07T12:30:44,192 ERROR [FSHLog-0-hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData-prefix:87fb68174c52,36547,1730982614435 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:44,192 WARN [FSHLog-0-hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData-prefix:87fb68174c52,36547,1730982614435 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:44,192 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 87fb68174c52%2C36547%2C1730982614435:(num 1730982614578) roll requested 2024-11-07T12:30:44,193 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C36547%2C1730982614435.1730982644192 2024-11-07T12:30:44,197 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,197 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,197 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,198 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,198 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,198 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435/87fb68174c52%2C36547%2C1730982614435.1730982614578 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435/87fb68174c52%2C36547%2C1730982614435.1730982644192 2024-11-07T12:30:44,198 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:44,198 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41465,DS-f38ef4b4-466d-4084-84a0-2b69237f20d6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:30:44,198 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435/87fb68174c52%2C36547%2C1730982614435.1730982614578 2024-11-07T12:30:44,199 WARN [IPC Server handler 0 on default port 38891 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435/87fb68174c52%2C36547%2C1730982614435.1730982614578 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-07T12:30:44,199 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435/87fb68174c52%2C36547%2C1730982614435.1730982614578 after 1ms 2024-11-07T12:30:44,199 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39161:39161),(127.0.0.1/127.0.0.1:34599:34599)] 2024-11-07T12:30:44,199 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435/87fb68174c52%2C36547%2C1730982614435.1730982614578 is not closed yet, will try archiving it next time 2024-11-07T12:30:44,214 DEBUG [M:0;87fb68174c52:36547 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8f5883e22024405b8c7a1fccf35f7c4b is 82, key is hbase:meta,,1/info:regioninfo/1730982615286/Put/seqid=0 2024-11-07T12:30:44,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741848_1033 (size=5672) 2024-11-07T12:30:44,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741848_1033 (size=5672) 2024-11-07T12:30:44,219 INFO [M:0;87fb68174c52:36547 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8f5883e22024405b8c7a1fccf35f7c4b 2024-11-07T12:30:44,239 DEBUG [M:0;87fb68174c52:36547 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f0df5d76be124d6780d936a6fc9e4fb5 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730982615787/Put/seqid=0 2024-11-07T12:30:44,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:44,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741849_1034 (size=6118) 2024-11-07T12:30:44,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741849_1034 (size=6118) 2024-11-07T12:30:44,244 INFO [M:0;87fb68174c52:36547 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f0df5d76be124d6780d936a6fc9e4fb5 2024-11-07T12:30:44,262 DEBUG [M:0;87fb68174c52:36547 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d781180bf05543b69f9a6cf58c3a8258 is 69, key is 87fb68174c52,33225,1730982614484/rs:state/1730982614720/Put/seqid=0 2024-11-07T12:30:44,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741850_1035 (size=5156) 2024-11-07T12:30:44,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741850_1035 (size=5156) 2024-11-07T12:30:44,268 INFO [M:0;87fb68174c52:36547 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d781180bf05543b69f9a6cf58c3a8258 2024-11-07T12:30:44,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:30:44,281 INFO [RS:0;87fb68174c52:33225 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:30:44,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33225-0x1001a38a0330001, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:30:44,281 INFO [RS:0;87fb68174c52:33225 {}] regionserver.HRegionServer(1031): Exiting; stopping=87fb68174c52,33225,1730982614484; zookeeper connection closed. 2024-11-07T12:30:44,282 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52875cd5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52875cd5 2024-11-07T12:30:44,282 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:30:44,289 DEBUG [M:0;87fb68174c52:36547 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/99a4f129d8b94794a20108a9c1441ccc is 52, key is load_balancer_on/state:d/1730982615412/Put/seqid=0 2024-11-07T12:30:44,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741851_1036 (size=5056) 2024-11-07T12:30:44,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741851_1036 (size=5056) 2024-11-07T12:30:44,294 INFO [M:0;87fb68174c52:36547 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/99a4f129d8b94794a20108a9c1441ccc 2024-11-07T12:30:44,300 DEBUG [M:0;87fb68174c52:36547 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8f5883e22024405b8c7a1fccf35f7c4b as hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8f5883e22024405b8c7a1fccf35f7c4b 2024-11-07T12:30:44,304 INFO [M:0;87fb68174c52:36547 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8f5883e22024405b8c7a1fccf35f7c4b, entries=8, sequenceid=56, filesize=5.5 K 2024-11-07T12:30:44,305 DEBUG [M:0;87fb68174c52:36547 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f0df5d76be124d6780d936a6fc9e4fb5 as hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f0df5d76be124d6780d936a6fc9e4fb5 2024-11-07T12:30:44,310 INFO [M:0;87fb68174c52:36547 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f0df5d76be124d6780d936a6fc9e4fb5, entries=6, sequenceid=56, filesize=6.0 K 2024-11-07T12:30:44,311 DEBUG [M:0;87fb68174c52:36547 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d781180bf05543b69f9a6cf58c3a8258 as hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d781180bf05543b69f9a6cf58c3a8258 2024-11-07T12:30:44,316 INFO [M:0;87fb68174c52:36547 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d781180bf05543b69f9a6cf58c3a8258, entries=1, sequenceid=56, filesize=5.0 K 2024-11-07T12:30:44,316 DEBUG [M:0;87fb68174c52:36547 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/99a4f129d8b94794a20108a9c1441ccc as hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/99a4f129d8b94794a20108a9c1441ccc 2024-11-07T12:30:44,321 INFO [M:0;87fb68174c52:36547 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/99a4f129d8b94794a20108a9c1441ccc, entries=1, sequenceid=56, filesize=4.9 K 2024-11-07T12:30:44,322 INFO [M:0;87fb68174c52:36547 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=56, compaction requested=false 2024-11-07T12:30:44,323 INFO [M:0;87fb68174c52:36547 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:44,323 DEBUG [M:0;87fb68174c52:36547 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982644191Disabling compacts and flushes for region at 1730982644191Disabling writes for close at 1730982644192 (+1 ms)Obtaining lock to block concurrent updates at 1730982644192Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730982644192Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1730982644192Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730982644200 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730982644200Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730982644214 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730982644214Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730982644224 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730982644238 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730982644238Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730982644248 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730982644262 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730982644262Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730982644273 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730982644288 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730982644288Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78676127: reopening flushed file at 1730982644299 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@783735e5: reopening flushed file at 1730982644305 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6662f7a2: reopening flushed file at 1730982644310 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f563b1d: reopening flushed file at 1730982644316 (+6 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=56, compaction requested=false at 1730982644322 (+6 ms)Writing region close event to WAL at 1730982644323 (+1 ms)Closed at 1730982644323 2024-11-07T12:30:44,324 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,324 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,324 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,324 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,324 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:30:44,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37561 is added to blk_1073741847_1031 (size=757) 2024-11-07T12:30:44,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45747 is added to blk_1073741847_1031 (size=757) 2024-11-07T12:30:44,419 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:30:44,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:44,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 after 68049ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:30:45,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:45,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:45,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,836 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:30:45,837 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,837 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,838 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,838 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:45,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:45,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:46,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:46,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:46,517 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:30:46,518 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T12:30:46,518 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-07T12:30:46,518 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-07T12:30:46,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:47,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:47,180 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-07T12:30:47,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:47,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:48,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:48,200 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435/87fb68174c52%2C36547%2C1730982614435.1730982614578 after 4001ms 2024-11-07T12:30:48,200 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/WALs/87fb68174c52,36547,1730982614435/87fb68174c52%2C36547%2C1730982614435.1730982614578 to hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/oldWALs/87fb68174c52%2C36547%2C1730982614435.1730982614578 2024-11-07T12:30:48,203 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/MasterData/oldWALs/87fb68174c52%2C36547%2C1730982614435.1730982614578 to hdfs://localhost:38891/user/jenkins/test-data/2c4e848e-1180-1b54-0998-271609f54699/oldWALs/87fb68174c52%2C36547%2C1730982614435.1730982614578$masterlocalwal$ 2024-11-07T12:30:48,203 INFO [M:0;87fb68174c52:36547 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:30:48,203 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:30:48,203 INFO [M:0;87fb68174c52:36547 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36547 2024-11-07T12:30:48,203 INFO [M:0;87fb68174c52:36547 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:30:48,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:48,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:30:48,305 INFO [M:0;87fb68174c52:36547 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:30:48,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36547-0x1001a38a0330000, quorum=127.0.0.1:61811, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:30:48,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@468f57bd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:48,308 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5091fc79{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:30:48,308 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:30:48,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ebbad67{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:30:48,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff0f915{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,STOPPED} 2024-11-07T12:30:48,310 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:30:48,310 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:30:48,310 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1179130194-172.17.0.3-1730982613764 (Datanode Uuid 2f76db2f-1739-4f70-930b-40cb8a44a362) service to localhost/127.0.0.1:38891 2024-11-07T12:30:48,310 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:30:48,310 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data4/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:48,311 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data3/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:48,311 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:30:48,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78dcc37b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:48,313 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a382d25{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:30:48,313 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:30:48,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32c717fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:30:48,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45604664{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,STOPPED} 2024-11-07T12:30:48,314 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:30:48,314 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:30:48,314 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:30:48,314 WARN [BP-1179130194-172.17.0.3-1730982613764 heartbeating to localhost/127.0.0.1:38891 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1179130194-172.17.0.3-1730982613764 (Datanode Uuid 0e35ec97-7186-4c94-b36a-44916e277331) service to localhost/127.0.0.1:38891 2024-11-07T12:30:48,315 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data1/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:48,315 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/cluster_901cd26c-7b42-b2c2-e295-f8c68eab58b1/data/data2/current/BP-1179130194-172.17.0.3-1730982613764 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:30:48,315 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:30:48,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53a4c428{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:30:48,321 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@347a2271{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:30:48,321 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:30:48,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@421a8f73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:30:48,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3a743f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir/,STOPPED} 2024-11-07T12:30:48,327 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:30:48,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:30:48,354 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38891 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38891 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38891 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38891 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38891 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:38891 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38891 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38891 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 436) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=53 (was 75), ProcessCount=11 (was 11), AvailableMemoryMB=6411 (was 6577) 2024-11-07T12:30:48,361 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=53, ProcessCount=11, AvailableMemoryMB=6411 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.log.dir so I do NOT create it in target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/069a8d77-df1c-3fda-6c57-1662868477b2/hadoop.tmp.dir so I do NOT create it in target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93, deleteOnExit=true 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/test.cache.data in system properties and HBase conf 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:30:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:30:48,362 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:30:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:30:48,376 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:30:48,442 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:48,445 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:30:48,446 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:30:48,447 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:30:48,447 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:30:48,447 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:48,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e11e1e0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:30:48,448 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35dc38db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:30:48,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6df6a7f2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/java.io.tmpdir/jetty-localhost-44529-hadoop-hdfs-3_4_1-tests_jar-_-any-2525722338592654837/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:30:48,563 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@520bb90c{HTTP/1.1, (http/1.1)}{localhost:44529} 2024-11-07T12:30:48,563 INFO [Time-limited test {}] server.Server(415): Started @187236ms 2024-11-07T12:30:48,576 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:30:48,626 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:48,629 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:30:48,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:30:48,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:30:48,630 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:30:48,630 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1592e8d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:30:48,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ac52070{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:30:48,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4732430a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/java.io.tmpdir/jetty-localhost-45371-hadoop-hdfs-3_4_1-tests_jar-_-any-8926630987278220381/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:48,745 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5206174e{HTTP/1.1, (http/1.1)}{localhost:45371} 2024-11-07T12:30:48,745 INFO [Time-limited test {}] server.Server(415): Started @187418ms 2024-11-07T12:30:48,747 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:30:48,775 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:30:48,778 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:30:48,779 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:30:48,779 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:30:48,779 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:30:48,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ff483aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:30:48,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@144fb880{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:30:48,833 WARN [Thread-1641 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/data/data1/current/BP-653850386-172.17.0.3-1730982648392/current, will proceed with Du for space computation calculation, 2024-11-07T12:30:48,833 WARN [Thread-1642 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/data/data2/current/BP-653850386-172.17.0.3-1730982648392/current, will proceed with Du for space computation calculation, 2024-11-07T12:30:48,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:48,849 WARN [Thread-1620 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:30:48,852 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x423c70b5d9212470 with lease ID 0x5c666f18547fea6b: Processing first storage report for DS-f2272604-999d-42f4-a12c-bfaec238281e from datanode DatanodeRegistration(127.0.0.1:45739, datanodeUuid=5aa9d217-6f94-40ed-a24d-67fdf05694b1, infoPort=36937, infoSecurePort=0, ipcPort=39671, storageInfo=lv=-57;cid=testClusterID;nsid=520157475;c=1730982648392) 2024-11-07T12:30:48,852 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x423c70b5d9212470 with lease ID 0x5c666f18547fea6b: from storage DS-f2272604-999d-42f4-a12c-bfaec238281e node DatanodeRegistration(127.0.0.1:45739, datanodeUuid=5aa9d217-6f94-40ed-a24d-67fdf05694b1, infoPort=36937, infoSecurePort=0, ipcPort=39671, storageInfo=lv=-57;cid=testClusterID;nsid=520157475;c=1730982648392), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:48,852 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x423c70b5d9212470 with lease ID 0x5c666f18547fea6b: Processing first storage report for DS-444b905e-cfe4-4480-badb-a4df0ea51922 from datanode DatanodeRegistration(127.0.0.1:45739, datanodeUuid=5aa9d217-6f94-40ed-a24d-67fdf05694b1, infoPort=36937, infoSecurePort=0, ipcPort=39671, storageInfo=lv=-57;cid=testClusterID;nsid=520157475;c=1730982648392) 2024-11-07T12:30:48,852 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x423c70b5d9212470 with lease ID 0x5c666f18547fea6b: from storage DS-444b905e-cfe4-4480-badb-a4df0ea51922 node DatanodeRegistration(127.0.0.1:45739, datanodeUuid=5aa9d217-6f94-40ed-a24d-67fdf05694b1, infoPort=36937, infoSecurePort=0, ipcPort=39671, storageInfo=lv=-57;cid=testClusterID;nsid=520157475;c=1730982648392), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:48,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@666a2e8d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/java.io.tmpdir/jetty-localhost-43349-hadoop-hdfs-3_4_1-tests_jar-_-any-13751407133517166673/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:30:48,897 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3659c62{HTTP/1.1, (http/1.1)}{localhost:43349} 2024-11-07T12:30:48,897 INFO [Time-limited test {}] server.Server(415): Started @187570ms 2024-11-07T12:30:48,898 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:30:48,978 WARN [Thread-1667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/data/data3/current/BP-653850386-172.17.0.3-1730982648392/current, will proceed with Du for space computation calculation, 2024-11-07T12:30:48,978 WARN [Thread-1668 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/data/data4/current/BP-653850386-172.17.0.3-1730982648392/current, will proceed with Du for space computation calculation, 2024-11-07T12:30:48,997 WARN [Thread-1656 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:30:49,000 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5499fccce0961d39 with lease ID 0x5c666f18547fea6c: Processing first storage report for DS-2ac6134b-d6de-4373-8721-100b537ac93d from datanode DatanodeRegistration(127.0.0.1:40411, datanodeUuid=534d355c-5058-4cd9-9eb5-2b9a4c7ee40a, infoPort=45549, infoSecurePort=0, ipcPort=40853, storageInfo=lv=-57;cid=testClusterID;nsid=520157475;c=1730982648392) 2024-11-07T12:30:49,000 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5499fccce0961d39 with lease ID 0x5c666f18547fea6c: from storage DS-2ac6134b-d6de-4373-8721-100b537ac93d node DatanodeRegistration(127.0.0.1:40411, datanodeUuid=534d355c-5058-4cd9-9eb5-2b9a4c7ee40a, infoPort=45549, infoSecurePort=0, ipcPort=40853, storageInfo=lv=-57;cid=testClusterID;nsid=520157475;c=1730982648392), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:49,000 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5499fccce0961d39 with lease ID 0x5c666f18547fea6c: Processing first storage report for DS-6086f6a7-4bf1-4bd6-9f34-4b1a1cc6c4bc from datanode DatanodeRegistration(127.0.0.1:40411, datanodeUuid=534d355c-5058-4cd9-9eb5-2b9a4c7ee40a, infoPort=45549, infoSecurePort=0, ipcPort=40853, storageInfo=lv=-57;cid=testClusterID;nsid=520157475;c=1730982648392) 2024-11-07T12:30:49,000 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5499fccce0961d39 with lease ID 0x5c666f18547fea6c: from storage DS-6086f6a7-4bf1-4bd6-9f34-4b1a1cc6c4bc node DatanodeRegistration(127.0.0.1:40411, datanodeUuid=534d355c-5058-4cd9-9eb5-2b9a4c7ee40a, infoPort=45549, infoSecurePort=0, ipcPort=40853, storageInfo=lv=-57;cid=testClusterID;nsid=520157475;c=1730982648392), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:30:49,021 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4 2024-11-07T12:30:49,024 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/zookeeper_0, clientPort=64652, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:30:49,024 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64652 2024-11-07T12:30:49,025 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:49,026 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:49,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:49,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:30:49,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:30:49,036 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb with version=8 2024-11-07T12:30:49,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/hbase-staging 2024-11-07T12:30:49,037 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:30:49,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:49,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:49,038 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:30:49,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:49,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:30:49,038 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:30:49,038 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:30:49,038 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42317 2024-11-07T12:30:49,040 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42317 connecting to ZooKeeper ensemble=127.0.0.1:64652 2024-11-07T12:30:49,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:423170x0, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:30:49,046 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42317-0x1001a39275d0000 connected 2024-11-07T12:30:49,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:49,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:49,069 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:30:49,069 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb, hbase.cluster.distributed=false 2024-11-07T12:30:49,070 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:30:49,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42317 2024-11-07T12:30:49,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42317 2024-11-07T12:30:49,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42317 2024-11-07T12:30:49,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42317 2024-11-07T12:30:49,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42317 2024-11-07T12:30:49,090 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:30:49,090 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:49,090 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:49,090 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:30:49,090 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:30:49,090 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:30:49,090 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:30:49,090 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:30:49,091 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38255 2024-11-07T12:30:49,092 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38255 connecting to ZooKeeper ensemble=127.0.0.1:64652 2024-11-07T12:30:49,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:49,094 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:49,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:382550x0, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:30:49,099 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:382550x0, quorum=127.0.0.1:64652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:30:49,099 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38255-0x1001a39275d0001 connected 2024-11-07T12:30:49,099 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:30:49,102 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:30:49,102 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:30:49,103 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:30:49,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38255 2024-11-07T12:30:49,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38255 2024-11-07T12:30:49,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38255 2024-11-07T12:30:49,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38255 2024-11-07T12:30:49,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38255 2024-11-07T12:30:49,120 DEBUG [M:0;87fb68174c52:42317 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87fb68174c52:42317 2024-11-07T12:30:49,120 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87fb68174c52,42317,1730982649037 2024-11-07T12:30:49,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:30:49,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:30:49,122 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87fb68174c52,42317,1730982649037 2024-11-07T12:30:49,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:30:49,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,125 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:30:49,125 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87fb68174c52,42317,1730982649037 from backup master directory 2024-11-07T12:30:49,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:30:49,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87fb68174c52,42317,1730982649037 2024-11-07T12:30:49,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:30:49,127 WARN [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:30:49,127 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87fb68174c52,42317,1730982649037 2024-11-07T12:30:49,131 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/hbase.id] with ID: 375433bc-167f-455d-a418-99e521bf2e26 2024-11-07T12:30:49,131 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/.tmp/hbase.id 2024-11-07T12:30:49,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:30:49,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:30:49,137 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/.tmp/hbase.id]:[hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/hbase.id] 2024-11-07T12:30:49,146 INFO [master/87fb68174c52:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:49,146 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:30:49,148 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-07T12:30:49,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:30:49,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:30:49,158 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:30:49,159 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:30:49,159 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:30:49,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:30:49,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:30:49,166 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store 2024-11-07T12:30:49,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:30:49,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:30:49,172 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:49,172 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:30:49,172 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:49,173 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:49,173 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:30:49,173 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:49,173 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:30:49,173 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982649172Disabling compacts and flushes for region at 1730982649172Disabling writes for close at 1730982649173 (+1 ms)Writing region close event to WAL at 1730982649173Closed at 1730982649173 2024-11-07T12:30:49,173 WARN [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/.initializing 2024-11-07T12:30:49,173 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/WALs/87fb68174c52,42317,1730982649037 2024-11-07T12:30:49,175 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C42317%2C1730982649037, suffix=, logDir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/WALs/87fb68174c52,42317,1730982649037, archiveDir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/oldWALs, maxLogs=10 2024-11-07T12:30:49,176 INFO [master/87fb68174c52:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C42317%2C1730982649037.1730982649176 2024-11-07T12:30:49,180 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/WALs/87fb68174c52,42317,1730982649037/87fb68174c52%2C42317%2C1730982649037.1730982649176 2024-11-07T12:30:49,180 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36937:36937),(127.0.0.1/127.0.0.1:45549:45549)] 2024-11-07T12:30:49,181 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:30:49,181 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:49,181 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,181 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:30:49,184 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:49,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:30:49,185 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:30:49,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:30:49,187 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:30:49,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:30:49,188 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:30:49,188 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,189 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,189 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,190 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,190 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,191 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:30:49,192 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:30:49,194 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:30:49,194 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848797, jitterRate=0.079302117228508}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:30:49,195 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730982649181Initializing all the Stores at 1730982649182 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982649182Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982649182Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982649182Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982649182Cleaning up temporary data from old regions at 1730982649190 (+8 ms)Region opened successfully at 1730982649195 (+5 ms) 2024-11-07T12:30:49,195 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:30:49,197 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a15ff07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:30:49,198 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:30:49,198 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:30:49,198 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:30:49,198 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:30:49,199 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:30:49,199 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:30:49,199 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:30:49,201 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:30:49,202 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:30:49,203 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:30:49,203 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:30:49,204 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:30:49,205 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:30:49,206 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:30:49,206 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:30:49,208 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:30:49,208 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:30:49,210 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:30:49,211 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:30:49,213 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:30:49,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:30:49,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:30:49,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,215 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87fb68174c52,42317,1730982649037, sessionid=0x1001a39275d0000, setting cluster-up flag (Was=false) 2024-11-07T12:30:49,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,224 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:30:49,225 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,42317,1730982649037 2024-11-07T12:30:49,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,233 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:30:49,234 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,42317,1730982649037 2024-11-07T12:30:49,235 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:30:49,237 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:30:49,237 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:30:49,237 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:30:49,237 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87fb68174c52,42317,1730982649037 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:30:49,238 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:30:49,239 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:30:49,239 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:30:49,239 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:30:49,239 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87fb68174c52:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:30:49,239 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,239 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:30:49,239 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730982679239 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:30:49,240 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:30:49,240 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:30:49,241 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:30:49,241 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:30:49,241 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:30:49,241 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982649241,5,FailOnTimeoutGroup] 2024-11-07T12:30:49,241 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982649241,5,FailOnTimeoutGroup] 2024-11-07T12:30:49,241 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,241 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:30:49,241 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,241 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,241 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,242 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:30:49,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:49,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:30:49,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:30:49,250 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:30:49,250 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb 2024-11-07T12:30:49,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:30:49,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:30:49,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:49,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:30:49,259 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:30:49,259 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:49,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:30:49,260 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:30:49,260 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,261 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:49,261 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:30:49,262 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:30:49,262 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:49,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:30:49,263 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:30:49,264 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:49,264 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:30:49,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740 2024-11-07T12:30:49,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740 2024-11-07T12:30:49,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:30:49,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:30:49,267 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:30:49,268 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:30:49,270 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:30:49,270 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817213, jitterRate=0.03914014995098114}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:30:49,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730982649257Initializing all the Stores at 1730982649257Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982649257Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982649258 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982649258Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982649258Cleaning up temporary data from old regions at 1730982649266 (+8 ms)Region opened successfully at 1730982649271 (+5 ms) 2024-11-07T12:30:49,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:30:49,271 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:30:49,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:30:49,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:30:49,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:30:49,271 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:30:49,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982649271Disabling compacts and flushes for region at 1730982649271Disabling writes for close at 1730982649271Writing region close event to WAL at 1730982649271Closed at 1730982649271 2024-11-07T12:30:49,272 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:30:49,273 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:30:49,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:30:49,274 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:30:49,275 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:30:49,310 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(746): ClusterId : 375433bc-167f-455d-a418-99e521bf2e26 2024-11-07T12:30:49,310 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:30:49,312 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:30:49,312 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:30:49,314 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:30:49,314 DEBUG [RS:0;87fb68174c52:38255 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e1ece11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:30:49,326 DEBUG [RS:0;87fb68174c52:38255 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87fb68174c52:38255 2024-11-07T12:30:49,326 INFO [RS:0;87fb68174c52:38255 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:30:49,326 INFO [RS:0;87fb68174c52:38255 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:30:49,326 DEBUG [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:30:49,327 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(2659): reportForDuty to master=87fb68174c52,42317,1730982649037 with port=38255, startcode=1730982649090 2024-11-07T12:30:49,327 DEBUG [RS:0;87fb68174c52:38255 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:30:49,329 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56527, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:30:49,330 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42317 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87fb68174c52,38255,1730982649090 2024-11-07T12:30:49,330 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42317 {}] master.ServerManager(517): Registering regionserver=87fb68174c52,38255,1730982649090 2024-11-07T12:30:49,332 DEBUG [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb 2024-11-07T12:30:49,332 DEBUG [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41511 2024-11-07T12:30:49,332 DEBUG [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:30:49,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:30:49,335 DEBUG [RS:0;87fb68174c52:38255 {}] zookeeper.ZKUtil(111): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87fb68174c52,38255,1730982649090 2024-11-07T12:30:49,335 WARN [RS:0;87fb68174c52:38255 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:30:49,335 INFO [RS:0;87fb68174c52:38255 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:30:49,335 DEBUG [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090 2024-11-07T12:30:49,336 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87fb68174c52,38255,1730982649090] 2024-11-07T12:30:49,339 INFO [RS:0;87fb68174c52:38255 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:30:49,340 INFO [RS:0;87fb68174c52:38255 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:30:49,341 INFO [RS:0;87fb68174c52:38255 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:30:49,341 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,341 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:30:49,342 INFO [RS:0;87fb68174c52:38255 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:30:49,342 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:30:49,342 DEBUG [RS:0;87fb68174c52:38255 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:30:49,343 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,343 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,343 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,343 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,343 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,343 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,38255,1730982649090-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:30:49,358 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:30:49,358 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,38255,1730982649090-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,358 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,358 INFO [RS:0;87fb68174c52:38255 {}] regionserver.Replication(171): 87fb68174c52,38255,1730982649090 started 2024-11-07T12:30:49,372 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,372 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(1482): Serving as 87fb68174c52,38255,1730982649090, RpcServer on 87fb68174c52/172.17.0.3:38255, sessionid=0x1001a39275d0001 2024-11-07T12:30:49,372 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:30:49,372 DEBUG [RS:0;87fb68174c52:38255 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87fb68174c52,38255,1730982649090 2024-11-07T12:30:49,372 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,38255,1730982649090' 2024-11-07T12:30:49,372 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:30:49,373 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:30:49,374 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:30:49,374 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:30:49,374 DEBUG [RS:0;87fb68174c52:38255 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87fb68174c52,38255,1730982649090 2024-11-07T12:30:49,374 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,38255,1730982649090' 2024-11-07T12:30:49,374 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:30:49,374 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:30:49,374 DEBUG [RS:0;87fb68174c52:38255 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:30:49,374 INFO [RS:0;87fb68174c52:38255 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:30:49,374 INFO [RS:0;87fb68174c52:38255 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:30:49,426 WARN [87fb68174c52:42317 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:30:49,476 INFO [RS:0;87fb68174c52:38255 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C38255%2C1730982649090, suffix=, logDir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090, archiveDir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/oldWALs, maxLogs=32 2024-11-07T12:30:49,477 INFO [RS:0;87fb68174c52:38255 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C38255%2C1730982649090.1730982649477 2024-11-07T12:30:49,482 INFO [RS:0;87fb68174c52:38255 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.1730982649477 2024-11-07T12:30:49,483 DEBUG [RS:0;87fb68174c52:38255 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36937:36937),(127.0.0.1/127.0.0.1:45549:45549)] 2024-11-07T12:30:49,676 DEBUG [87fb68174c52:42317 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:30:49,677 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87fb68174c52,38255,1730982649090 2024-11-07T12:30:49,678 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,38255,1730982649090, state=OPENING 2024-11-07T12:30:49,680 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:30:49,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:30:49,682 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:30:49,682 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,38255,1730982649090}] 2024-11-07T12:30:49,682 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:30:49,682 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:30:49,835 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:30:49,837 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59699, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:30:49,841 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:30:49,841 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:30:49,843 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C38255%2C1730982649090.meta, suffix=.meta, logDir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090, archiveDir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/oldWALs, maxLogs=32 2024-11-07T12:30:49,844 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C38255%2C1730982649090.meta.1730982649843.meta 2024-11-07T12:30:49,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:49,849 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.meta.1730982649843.meta 2024-11-07T12:30:49,857 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45549:45549),(127.0.0.1/127.0.0.1:36937:36937)] 2024-11-07T12:30:49,859 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:30:49,860 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:30:49,860 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:30:49,860 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:30:49,860 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:30:49,860 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:49,860 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:30:49,860 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:30:49,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:30:49,862 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:30:49,862 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,863 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:49,863 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:30:49,863 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:30:49,863 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:49,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:30:49,864 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:30:49,864 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:49,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:30:49,865 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:30:49,865 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:30:49,866 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:30:49,866 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740 2024-11-07T12:30:49,867 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740 2024-11-07T12:30:49,868 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:30:49,868 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:30:49,869 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:30:49,870 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:30:49,871 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767113, jitterRate=-0.024566054344177246}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:30:49,871 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:30:49,872 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730982649860Writing region info on filesystem at 1730982649860Initializing all the Stores at 1730982649861 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982649861Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982649861Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982649861Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982649861Cleaning up temporary data from old regions at 1730982649868 (+7 ms)Running coprocessor post-open hooks at 1730982649871 (+3 ms)Region opened successfully at 1730982649872 (+1 ms) 2024-11-07T12:30:49,873 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730982649835 2024-11-07T12:30:49,875 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:30:49,875 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:30:49,876 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,38255,1730982649090 2024-11-07T12:30:49,877 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,38255,1730982649090, state=OPEN 2024-11-07T12:30:49,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:30:49,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:30:49,881 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87fb68174c52,38255,1730982649090 2024-11-07T12:30:49,881 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:30:49,881 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:30:49,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:30:49,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,38255,1730982649090 in 199 msec 2024-11-07T12:30:49,887 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:30:49,887 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-11-07T12:30:49,887 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:30:49,887 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:30:49,889 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:30:49,889 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,38255,1730982649090, seqNum=-1] 2024-11-07T12:30:49,889 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:30:49,891 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58543, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:30:49,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 659 msec 2024-11-07T12:30:49,896 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730982649896, completionTime=-1 2024-11-07T12:30:49,896 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:30:49,896 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-07T12:30:49,898 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-07T12:30:49,898 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730982709898 2024-11-07T12:30:49,898 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730982769898 2024-11-07T12:30:49,898 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-07T12:30:49,898 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,42317,1730982649037-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,898 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,42317,1730982649037-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,898 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,42317,1730982649037-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,898 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87fb68174c52:42317, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,898 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,898 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,900 DEBUG [master/87fb68174c52:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:30:49,901 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.774sec 2024-11-07T12:30:49,902 INFO [master/87fb68174c52:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:30:49,902 INFO [master/87fb68174c52:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:30:49,902 INFO [master/87fb68174c52:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:30:49,902 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:30:49,902 INFO [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:30:49,902 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,42317,1730982649037-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:30:49,902 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,42317,1730982649037-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:30:49,904 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:30:49,904 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:30:49,904 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,42317,1730982649037-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:30:49,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c734161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:30:49,910 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87fb68174c52,42317,-1 for getting cluster id 2024-11-07T12:30:49,910 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:30:49,912 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '375433bc-167f-455d-a418-99e521bf2e26' 2024-11-07T12:30:49,912 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:30:49,912 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "375433bc-167f-455d-a418-99e521bf2e26" 2024-11-07T12:30:49,912 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a8290de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:30:49,912 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87fb68174c52,42317,-1] 2024-11-07T12:30:49,913 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:30:49,913 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:30:49,914 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36106, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:30:49,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4355652c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:30:49,915 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:30:49,916 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,38255,1730982649090, seqNum=-1] 2024-11-07T12:30:49,916 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:30:49,917 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52310, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:30:49,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87fb68174c52,42317,1730982649037 2024-11-07T12:30:49,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:30:49,921 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:30:49,921 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T12:30:49,922 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 87fb68174c52,42317,1730982649037 2024-11-07T12:30:49,922 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4c5dc439 2024-11-07T12:30:49,923 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:30:49,923 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36120, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:30:49,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-07T12:30:49,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-07T12:30:49,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:30:49,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:30:49,927 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:30:49,927 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:49,927 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-07T12:30:49,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:30:49,928 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:30:49,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741835_1011 (size=405) 2024-11-07T12:30:49,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741835_1011 (size=405) 2024-11-07T12:30:49,937 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8360189b1eb5e990a8abba3b352d11ab, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb 2024-11-07T12:30:49,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741836_1012 (size=88) 2024-11-07T12:30:49,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741836_1012 (size=88) 2024-11-07T12:30:49,943 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:49,943 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 8360189b1eb5e990a8abba3b352d11ab, disabling compactions & flushes 2024-11-07T12:30:49,944 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:30:49,944 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:30:49,944 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. after waiting 0 ms 2024-11-07T12:30:49,944 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:30:49,944 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:30:49,944 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8360189b1eb5e990a8abba3b352d11ab: Waiting for close lock at 1730982649943Disabling compacts and flushes for region at 1730982649943Disabling writes for close at 1730982649944 (+1 ms)Writing region close event to WAL at 1730982649944Closed at 1730982649944 2024-11-07T12:30:49,945 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:30:49,946 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1730982649945"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730982649945"}]},"ts":"1730982649945"} 2024-11-07T12:30:49,948 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T12:30:49,949 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:30:49,950 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730982649949"}]},"ts":"1730982649949"} 2024-11-07T12:30:49,952 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-07T12:30:49,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8360189b1eb5e990a8abba3b352d11ab, ASSIGN}] 2024-11-07T12:30:49,953 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8360189b1eb5e990a8abba3b352d11ab, ASSIGN 2024-11-07T12:30:49,954 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8360189b1eb5e990a8abba3b352d11ab, ASSIGN; state=OFFLINE, location=87fb68174c52,38255,1730982649090; forceNewPlan=false, retain=false 2024-11-07T12:30:50,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:50,105 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8360189b1eb5e990a8abba3b352d11ab, regionState=OPENING, regionLocation=87fb68174c52,38255,1730982649090 2024-11-07T12:30:50,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8360189b1eb5e990a8abba3b352d11ab, ASSIGN because future has completed 2024-11-07T12:30:50,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8360189b1eb5e990a8abba3b352d11ab, server=87fb68174c52,38255,1730982649090}] 2024-11-07T12:30:50,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:50,265 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:30:50,265 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8360189b1eb5e990a8abba3b352d11ab, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:30:50,265 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,265 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:30:50,265 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,265 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,267 INFO [StoreOpener-8360189b1eb5e990a8abba3b352d11ab-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,268 INFO [StoreOpener-8360189b1eb5e990a8abba3b352d11ab-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8360189b1eb5e990a8abba3b352d11ab columnFamilyName info 2024-11-07T12:30:50,268 DEBUG [StoreOpener-8360189b1eb5e990a8abba3b352d11ab-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:30:50,268 INFO [StoreOpener-8360189b1eb5e990a8abba3b352d11ab-1 {}] regionserver.HStore(327): Store=8360189b1eb5e990a8abba3b352d11ab/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:30:50,268 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,269 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,269 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,270 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,270 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,271 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,273 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:30:50,273 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8360189b1eb5e990a8abba3b352d11ab; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=881909, jitterRate=0.12140607833862305}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:30:50,273 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:30:50,274 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8360189b1eb5e990a8abba3b352d11ab: Running coprocessor pre-open hook at 1730982650265Writing region info on filesystem at 1730982650265Initializing all the Stores at 1730982650266 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982650266Cleaning up temporary data from old regions at 1730982650270 (+4 ms)Running coprocessor post-open hooks at 1730982650273 (+3 ms)Region opened successfully at 1730982650274 (+1 ms) 2024-11-07T12:30:50,275 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab., pid=6, masterSystemTime=1730982650261 2024-11-07T12:30:50,278 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:30:50,278 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:30:50,279 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8360189b1eb5e990a8abba3b352d11ab, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,38255,1730982649090 2024-11-07T12:30:50,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8360189b1eb5e990a8abba3b352d11ab, server=87fb68174c52,38255,1730982649090 because future has completed 2024-11-07T12:30:50,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:30:50,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8360189b1eb5e990a8abba3b352d11ab, server=87fb68174c52,38255,1730982649090 in 174 msec 2024-11-07T12:30:50,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:30:50,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8360189b1eb5e990a8abba3b352d11ab, ASSIGN in 333 msec 2024-11-07T12:30:50,288 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:30:50,288 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730982650288"}]},"ts":"1730982650288"} 2024-11-07T12:30:50,290 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-07T12:30:50,291 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:30:50,293 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 367 msec 2024-11-07T12:30:50,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:51,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:51,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:51,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:52,020 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:30:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:52,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:30:52,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:52,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:53,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:53,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:53,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:54,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:54,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:54,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:55,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:55,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:55,339 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:30:55,339 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-07T12:30:55,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:56,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:56,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:56,517 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T12:30:56,517 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-07T12:30:56,518 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:30:56,518 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-07T12:30:56,518 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-07T12:30:56,518 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-07T12:30:56,519 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:30:56,519 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-07T12:30:56,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:57,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:57,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:57,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:58,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:58,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:58,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:59,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:59,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:30:59,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:31:00,027 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-07T12:31:00,028 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-07T12:31:00,030 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:31:00,030 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:00,033 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab., hostname=87fb68174c52,38255,1730982649090, seqNum=2] 2024-11-07T12:31:00,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:00,040 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:31:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:31:00,046 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-07T12:31:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-07T12:31:00,047 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T12:31:00,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T12:31:00,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38255 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-07T12:31:00,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:00,209 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 8360189b1eb5e990a8abba3b352d11ab 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-07T12:31:00,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/8a5f1ba7fa91492e9d7f24eefcdd4f65 is 1080, key is row0001/info:/1730982660034/Put/seqid=0 2024-11-07T12:31:00,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741837_1013 (size=6033) 2024-11-07T12:31:00,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741837_1013 (size=6033) 2024-11-07T12:31:00,231 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/8a5f1ba7fa91492e9d7f24eefcdd4f65 2024-11-07T12:31:00,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/8a5f1ba7fa91492e9d7f24eefcdd4f65 as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/8a5f1ba7fa91492e9d7f24eefcdd4f65 2024-11-07T12:31:00,242 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/8a5f1ba7fa91492e9d7f24eefcdd4f65, entries=1, sequenceid=5, filesize=5.9 K 2024-11-07T12:31:00,243 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8360189b1eb5e990a8abba3b352d11ab in 34ms, sequenceid=5, compaction requested=false 2024-11-07T12:31:00,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 8360189b1eb5e990a8abba3b352d11ab: 2024-11-07T12:31:00,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:00,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-07T12:31:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-07T12:31:00,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:00,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-07T12:31:00,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-07T12:31:00,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 209 msec 2024-11-07T12:31:00,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:01,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:01,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:01,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:02,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:02,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:02,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:03,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:03,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:03,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:04,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:04,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:04,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:05,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:05,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:05,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:06,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:06,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:06,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:07,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:07,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:07,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:08,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:08,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:08,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:09,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:09,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:09,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:10,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:10,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-07T12:31:10,118 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-07T12:31:10,121 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:31:10,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:31:10,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-07T12:31:10,123 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-07T12:31:10,125 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T12:31:10,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T12:31:10,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:10,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38255 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-07T12:31:10,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:10,278 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 8360189b1eb5e990a8abba3b352d11ab 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-07T12:31:10,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/d651f18fec6641e4ac21200df1ae58d2 is 1080, key is row0002/info:/1730982670119/Put/seqid=0 2024-11-07T12:31:10,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741838_1014 (size=6033) 2024-11-07T12:31:10,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741838_1014 (size=6033) 2024-11-07T12:31:10,289 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/d651f18fec6641e4ac21200df1ae58d2 2024-11-07T12:31:10,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/d651f18fec6641e4ac21200df1ae58d2 as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/d651f18fec6641e4ac21200df1ae58d2 2024-11-07T12:31:10,300 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/d651f18fec6641e4ac21200df1ae58d2, entries=1, sequenceid=9, filesize=5.9 K 2024-11-07T12:31:10,301 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8360189b1eb5e990a8abba3b352d11ab in 23ms, sequenceid=9, compaction requested=false 2024-11-07T12:31:10,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 8360189b1eb5e990a8abba3b352d11ab: 2024-11-07T12:31:10,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:10,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-07T12:31:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-07T12:31:10,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-07T12:31:10,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-07T12:31:10,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-07T12:31:10,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:11,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:11,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 after 68044ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:31:11,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:11,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta after 68043ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:31:11,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:12,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:12,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:12,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:13,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:13,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:13,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:14,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:14,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:14,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:15,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:15,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:15,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:16,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:16,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:16,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:17,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:17,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:17,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:18,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:18,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:18,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:19,021 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:31:19,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:19,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:19,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:20,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:20,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-07T12:31:20,208 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-07T12:31:20,211 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C38255%2C1730982649090.1730982680210 2024-11-07T12:31:20,216 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:20,216 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:20,216 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:20,216 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:20,216 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:20,216 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.1730982649477 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.1730982680210 2024-11-07T12:31:20,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741833_1009 (size=5546) 2024-11-07T12:31:20,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741833_1009 (size=5546) 2024-11-07T12:31:20,221 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45549:45549),(127.0.0.1/127.0.0.1:36937:36937)] 2024-11-07T12:31:20,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:31:20,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:31:20,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-07T12:31:20,224 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-07T12:31:20,225 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T12:31:20,225 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T12:31:20,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:20,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38255 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-07T12:31:20,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:20,379 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 8360189b1eb5e990a8abba3b352d11ab 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-07T12:31:20,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/0d4073bf11e643c4adb575fa3481c333 is 1080, key is row0003/info:/1730982680209/Put/seqid=0 2024-11-07T12:31:20,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741840_1016 (size=6033) 2024-11-07T12:31:20,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741840_1016 (size=6033) 2024-11-07T12:31:20,389 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/0d4073bf11e643c4adb575fa3481c333 2024-11-07T12:31:20,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/0d4073bf11e643c4adb575fa3481c333 as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/0d4073bf11e643c4adb575fa3481c333 2024-11-07T12:31:20,400 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/0d4073bf11e643c4adb575fa3481c333, entries=1, sequenceid=13, filesize=5.9 K 2024-11-07T12:31:20,401 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8360189b1eb5e990a8abba3b352d11ab in 22ms, sequenceid=13, compaction requested=true 2024-11-07T12:31:20,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 8360189b1eb5e990a8abba3b352d11ab: 2024-11-07T12:31:20,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:20,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-07T12:31:20,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-07T12:31:20,405 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-07T12:31:20,405 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-07T12:31:20,408 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-07T12:31:20,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:21,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:21,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:21,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:22,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:22,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:22,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:23,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:23,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:23,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:24,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:24,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:24,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:25,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:25,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:25,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:26,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:26,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:26,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:27,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:27,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:27,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:28,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:28,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:28,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:29,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:29,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:29,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:29,905 INFO [master/87fb68174c52:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-07T12:31:29,905 INFO [master/87fb68174c52:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-07T12:31:30,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:30,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-07T12:31:30,237 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-07T12:31:30,237 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:31:30,239 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:31:30,239 DEBUG [Time-limited test {}] regionserver.HStore(1541): 8360189b1eb5e990a8abba3b352d11ab/info is initiating minor compaction (all files) 2024-11-07T12:31:30,239 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:31:30,239 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:30,239 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 8360189b1eb5e990a8abba3b352d11ab/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:30,239 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/8a5f1ba7fa91492e9d7f24eefcdd4f65, hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/d651f18fec6641e4ac21200df1ae58d2, hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/0d4073bf11e643c4adb575fa3481c333] into tmpdir=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp, totalSize=17.7 K 2024-11-07T12:31:30,240 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 8a5f1ba7fa91492e9d7f24eefcdd4f65, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1730982660034 2024-11-07T12:31:30,240 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d651f18fec6641e4ac21200df1ae58d2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1730982670119 2024-11-07T12:31:30,241 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0d4073bf11e643c4adb575fa3481c333, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1730982680209 2024-11-07T12:31:30,251 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 8360189b1eb5e990a8abba3b352d11ab#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:31:30,252 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/133dcd4a7bca47e8a9c2899a23e941c2 is 1080, key is row0001/info:/1730982660034/Put/seqid=0 2024-11-07T12:31:30,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741841_1017 (size=8296) 2024-11-07T12:31:30,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741841_1017 (size=8296) 2024-11-07T12:31:30,262 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/133dcd4a7bca47e8a9c2899a23e941c2 as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/133dcd4a7bca47e8a9c2899a23e941c2 2024-11-07T12:31:30,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:30,267 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8360189b1eb5e990a8abba3b352d11ab/info of 8360189b1eb5e990a8abba3b352d11ab into 133dcd4a7bca47e8a9c2899a23e941c2(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:31:30,267 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 8360189b1eb5e990a8abba3b352d11ab: 2024-11-07T12:31:30,270 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C38255%2C1730982649090.1730982690270 2024-11-07T12:31:30,275 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:30,275 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:30,275 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:30,276 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:30,276 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:30,276 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.1730982680210 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.1730982690270 2024-11-07T12:31:30,277 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45549:45549),(127.0.0.1/127.0.0.1:36937:36937)] 2024-11-07T12:31:30,277 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.1730982680210 is not closed yet, will try archiving it next time 2024-11-07T12:31:30,277 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.1730982649477 to hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/oldWALs/87fb68174c52%2C38255%2C1730982649090.1730982649477 2024-11-07T12:31:30,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741839_1015 (size=2520) 2024-11-07T12:31:30,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741839_1015 (size=2520) 2024-11-07T12:31:30,278 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:31:30,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:31:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-07T12:31:30,280 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-07T12:31:30,281 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T12:31:30,281 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T12:31:30,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38255 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-07T12:31:30,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:30,434 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 8360189b1eb5e990a8abba3b352d11ab 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-07T12:31:30,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/04635292d2444589844a6f578f8413a4 is 1080, key is row0000/info:/1730982690269/Put/seqid=0 2024-11-07T12:31:30,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741843_1019 (size=6033) 2024-11-07T12:31:30,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741843_1019 (size=6033) 2024-11-07T12:31:30,444 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/04635292d2444589844a6f578f8413a4 2024-11-07T12:31:30,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/04635292d2444589844a6f578f8413a4 as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/04635292d2444589844a6f578f8413a4 2024-11-07T12:31:30,454 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/04635292d2444589844a6f578f8413a4, entries=1, sequenceid=18, filesize=5.9 K 2024-11-07T12:31:30,455 INFO [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8360189b1eb5e990a8abba3b352d11ab in 21ms, sequenceid=18, compaction requested=false 2024-11-07T12:31:30,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 8360189b1eb5e990a8abba3b352d11ab: 2024-11-07T12:31:30,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:30,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-07T12:31:30,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-07T12:31:30,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-07T12:31:30,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-07T12:31:30,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-07T12:31:30,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:31,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:31,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:31,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:32,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:32,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:32,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:33,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:33,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:33,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:34,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:34,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:34,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:35,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:35,265 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8360189b1eb5e990a8abba3b352d11ab, had cached 0 bytes from a total of 14329 2024-11-07T12:31:35,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:35,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:36,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:36,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:36,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:37,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:37,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:37,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:38,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:38,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:38,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:39,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:39,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:39,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:40,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:40,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:40,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-07T12:31:40,308 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-07T12:31:40,310 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C38255%2C1730982649090.1730982700310 2024-11-07T12:31:40,316 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,316 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,316 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,316 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,317 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,317 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.1730982690270 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.1730982700310 2024-11-07T12:31:40,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741842_1018 (size=2026) 2024-11-07T12:31:40,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741842_1018 (size=2026) 2024-11-07T12:31:40,319 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/WALs/87fb68174c52,38255,1730982649090/87fb68174c52%2C38255%2C1730982649090.1730982680210 to hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/oldWALs/87fb68174c52%2C38255%2C1730982649090.1730982680210 2024-11-07T12:31:40,321 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36937:36937),(127.0.0.1/127.0.0.1:45549:45549)] 2024-11-07T12:31:40,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:31:40,321 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:31:40,321 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:31:40,321 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:31:40,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:31:40,322 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:31:40,322 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:31:40,322 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=345624216, stopped=false 2024-11-07T12:31:40,322 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87fb68174c52,42317,1730982649037 2024-11-07T12:31:40,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:31:40,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:40,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:31:40,324 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:31:40,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:40,324 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:31:40,324 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:31:40,324 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:31:40,324 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87fb68174c52,38255,1730982649090' ***** 2024-11-07T12:31:40,324 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:31:40,324 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:31:40,324 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:31:40,325 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(3091): Received CLOSE for 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(959): stopping server 87fb68174c52,38255,1730982649090 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87fb68174c52:38255. 2024-11-07T12:31:40,325 DEBUG [RS:0;87fb68174c52:38255 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:31:40,325 DEBUG [RS:0;87fb68174c52:38255 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:31:40,325 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8360189b1eb5e990a8abba3b352d11ab, disabling compactions & flushes 2024-11-07T12:31:40,325 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:31:40,325 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:40,325 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:31:40,325 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. after waiting 0 ms 2024-11-07T12:31:40,325 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:40,326 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8360189b1eb5e990a8abba3b352d11ab 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-07T12:31:40,326 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-07T12:31:40,326 DEBUG [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(1325): Online Regions={8360189b1eb5e990a8abba3b352d11ab=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab., 1588230740=hbase:meta,,1.1588230740} 2024-11-07T12:31:40,326 DEBUG [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8360189b1eb5e990a8abba3b352d11ab 2024-11-07T12:31:40,326 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:31:40,326 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:31:40,326 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:31:40,326 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:31:40,326 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:31:40,326 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-07T12:31:40,330 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/ef4a6035b5144d14b3e8f652ee1eee3d is 1080, key is row0001/info:/1730982700309/Put/seqid=0 2024-11-07T12:31:40,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741845_1021 (size=6033) 2024-11-07T12:31:40,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741845_1021 (size=6033) 2024-11-07T12:31:40,335 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/ef4a6035b5144d14b3e8f652ee1eee3d 2024-11-07T12:31:40,344 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/.tmp/info/ef4a6035b5144d14b3e8f652ee1eee3d as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/ef4a6035b5144d14b3e8f652ee1eee3d 2024-11-07T12:31:40,346 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/.tmp/info/d46d0918e65c49c9b9ee584691b92434 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab./info:regioninfo/1730982650278/Put/seqid=0 2024-11-07T12:31:40,348 INFO [regionserver/87fb68174c52:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T12:31:40,348 INFO [regionserver/87fb68174c52:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T12:31:40,349 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/ef4a6035b5144d14b3e8f652ee1eee3d, entries=1, sequenceid=22, filesize=5.9 K 2024-11-07T12:31:40,350 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8360189b1eb5e990a8abba3b352d11ab in 25ms, sequenceid=22, compaction requested=true 2024-11-07T12:31:40,350 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/8a5f1ba7fa91492e9d7f24eefcdd4f65, hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/d651f18fec6641e4ac21200df1ae58d2, hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/0d4073bf11e643c4adb575fa3481c333] to archive 2024-11-07T12:31:40,351 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:31:40,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741846_1022 (size=7308) 2024-11-07T12:31:40,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741846_1022 (size=7308) 2024-11-07T12:31:40,352 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/.tmp/info/d46d0918e65c49c9b9ee584691b92434 2024-11-07T12:31:40,353 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/8a5f1ba7fa91492e9d7f24eefcdd4f65 to hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/8a5f1ba7fa91492e9d7f24eefcdd4f65 2024-11-07T12:31:40,354 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/d651f18fec6641e4ac21200df1ae58d2 to hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/d651f18fec6641e4ac21200df1ae58d2 2024-11-07T12:31:40,356 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/0d4073bf11e643c4adb575fa3481c333 to hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/info/0d4073bf11e643c4adb575fa3481c333 2024-11-07T12:31:40,356 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=87fb68174c52:42317 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-07T12:31:40,356 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [8a5f1ba7fa91492e9d7f24eefcdd4f65=6033, d651f18fec6641e4ac21200df1ae58d2=6033, 0d4073bf11e643c4adb575fa3481c333=6033] 2024-11-07T12:31:40,360 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8360189b1eb5e990a8abba3b352d11ab/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-07T12:31:40,361 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:40,361 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8360189b1eb5e990a8abba3b352d11ab: Waiting for close lock at 1730982700325Running coprocessor pre-close hooks at 1730982700325Disabling compacts and flushes for region at 1730982700325Disabling writes for close at 1730982700325Obtaining lock to block concurrent updates at 1730982700326 (+1 ms)Preparing flush snapshotting stores in 8360189b1eb5e990a8abba3b352d11ab at 1730982700326Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1730982700326Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. at 1730982700326Flushing 8360189b1eb5e990a8abba3b352d11ab/info: creating writer at 1730982700327 (+1 ms)Flushing 8360189b1eb5e990a8abba3b352d11ab/info: appending metadata at 1730982700329 (+2 ms)Flushing 8360189b1eb5e990a8abba3b352d11ab/info: closing flushed file at 1730982700329Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5795fc27: reopening flushed file at 1730982700343 (+14 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8360189b1eb5e990a8abba3b352d11ab in 25ms, sequenceid=22, compaction requested=true at 1730982700350 (+7 ms)Writing region close event to WAL at 1730982700357 (+7 ms)Running coprocessor post-close hooks at 1730982700360 (+3 ms)Closed at 1730982700360 2024-11-07T12:31:40,361 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730982649924.8360189b1eb5e990a8abba3b352d11ab. 2024-11-07T12:31:40,371 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/.tmp/ns/62ef4bb53e72476d96802bfa2d17206b is 43, key is default/ns:d/1730982649891/Put/seqid=0 2024-11-07T12:31:40,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741847_1023 (size=5153) 2024-11-07T12:31:40,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741847_1023 (size=5153) 2024-11-07T12:31:40,376 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/.tmp/ns/62ef4bb53e72476d96802bfa2d17206b 2024-11-07T12:31:40,394 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/.tmp/table/3c71839a51b2456e95031b4ee7b2f0fb is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1730982650288/Put/seqid=0 2024-11-07T12:31:40,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741848_1024 (size=5508) 2024-11-07T12:31:40,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741848_1024 (size=5508) 2024-11-07T12:31:40,399 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/.tmp/table/3c71839a51b2456e95031b4ee7b2f0fb 2024-11-07T12:31:40,404 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/.tmp/info/d46d0918e65c49c9b9ee584691b92434 as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/info/d46d0918e65c49c9b9ee584691b92434 2024-11-07T12:31:40,409 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/info/d46d0918e65c49c9b9ee584691b92434, entries=10, sequenceid=11, filesize=7.1 K 2024-11-07T12:31:40,409 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/.tmp/ns/62ef4bb53e72476d96802bfa2d17206b as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/ns/62ef4bb53e72476d96802bfa2d17206b 2024-11-07T12:31:40,414 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/ns/62ef4bb53e72476d96802bfa2d17206b, entries=2, sequenceid=11, filesize=5.0 K 2024-11-07T12:31:40,415 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/.tmp/table/3c71839a51b2456e95031b4ee7b2f0fb as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/table/3c71839a51b2456e95031b4ee7b2f0fb 2024-11-07T12:31:40,419 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/table/3c71839a51b2456e95031b4ee7b2f0fb, entries=2, sequenceid=11, filesize=5.4 K 2024-11-07T12:31:40,421 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false 2024-11-07T12:31:40,425 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-07T12:31:40,425 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:31:40,425 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:31:40,425 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982700326Running coprocessor pre-close hooks at 1730982700326Disabling compacts and flushes for region at 1730982700326Disabling writes for close at 1730982700326Obtaining lock to block concurrent updates at 1730982700326Preparing flush snapshotting stores in 1588230740 at 1730982700326Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1730982700327 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1730982700327Flushing 1588230740/info: creating writer at 1730982700327Flushing 1588230740/info: appending metadata at 1730982700345 (+18 ms)Flushing 1588230740/info: closing flushed file at 1730982700346 (+1 ms)Flushing 1588230740/ns: creating writer at 1730982700357 (+11 ms)Flushing 1588230740/ns: appending metadata at 1730982700370 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1730982700371 (+1 ms)Flushing 1588230740/table: creating writer at 1730982700381 (+10 ms)Flushing 1588230740/table: appending metadata at 1730982700394 (+13 ms)Flushing 1588230740/table: closing flushed file at 1730982700394Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a99cf77: reopening flushed file at 1730982700404 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@714211f8: reopening flushed file at 1730982700409 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30bd47a3: reopening flushed file at 1730982700414 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false at 1730982700421 (+7 ms)Writing region close event to WAL at 1730982700422 (+1 ms)Running coprocessor post-close hooks at 1730982700425 (+3 ms)Closed at 1730982700425 2024-11-07T12:31:40,425 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:31:40,526 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(976): stopping server 87fb68174c52,38255,1730982649090; all regions closed. 2024-11-07T12:31:40,527 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,527 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,527 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,527 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,527 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741834_1010 (size=3306) 2024-11-07T12:31:40,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741834_1010 (size=3306) 2024-11-07T12:31:40,531 DEBUG [RS:0;87fb68174c52:38255 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/oldWALs 2024-11-07T12:31:40,531 INFO [RS:0;87fb68174c52:38255 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C38255%2C1730982649090.meta:.meta(num 1730982649843) 2024-11-07T12:31:40,532 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,532 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,532 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,532 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,532 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741844_1020 (size=1252) 2024-11-07T12:31:40,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741844_1020 (size=1252) 2024-11-07T12:31:40,536 DEBUG [RS:0;87fb68174c52:38255 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/oldWALs 2024-11-07T12:31:40,536 INFO [RS:0;87fb68174c52:38255 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C38255%2C1730982649090:(num 1730982700310) 2024-11-07T12:31:40,536 DEBUG [RS:0;87fb68174c52:38255 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:31:40,536 INFO [RS:0;87fb68174c52:38255 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:31:40,536 INFO [RS:0;87fb68174c52:38255 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:31:40,536 INFO [RS:0;87fb68174c52:38255 {}] hbase.ChoreService(370): Chore service for: regionserver/87fb68174c52:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T12:31:40,536 INFO [RS:0;87fb68174c52:38255 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:31:40,536 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:31:40,537 INFO [RS:0;87fb68174c52:38255 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38255 2024-11-07T12:31:40,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87fb68174c52,38255,1730982649090 2024-11-07T12:31:40,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:31:40,539 INFO [RS:0;87fb68174c52:38255 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:31:40,540 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87fb68174c52,38255,1730982649090] 2024-11-07T12:31:40,542 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87fb68174c52,38255,1730982649090 already deleted, retry=false 2024-11-07T12:31:40,542 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87fb68174c52,38255,1730982649090 expired; onlineServers=0 2024-11-07T12:31:40,542 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87fb68174c52,42317,1730982649037' ***** 2024-11-07T12:31:40,542 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:31:40,542 INFO [M:0;87fb68174c52:42317 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:31:40,542 INFO [M:0;87fb68174c52:42317 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:31:40,542 DEBUG [M:0;87fb68174c52:42317 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:31:40,542 DEBUG [M:0;87fb68174c52:42317 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:31:40,542 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:31:40,542 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982649241 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982649241,5,FailOnTimeoutGroup] 2024-11-07T12:31:40,542 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982649241 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982649241,5,FailOnTimeoutGroup] 2024-11-07T12:31:40,542 INFO [M:0;87fb68174c52:42317 {}] hbase.ChoreService(370): Chore service for: master/87fb68174c52:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:31:40,542 INFO [M:0;87fb68174c52:42317 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:31:40,542 DEBUG [M:0;87fb68174c52:42317 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:31:40,542 INFO [M:0;87fb68174c52:42317 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:31:40,543 INFO [M:0;87fb68174c52:42317 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:31:40,543 INFO [M:0;87fb68174c52:42317 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:31:40,543 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:31:40,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:31:40,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:40,544 DEBUG [M:0;87fb68174c52:42317 {}] zookeeper.ZKUtil(347): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:31:40,544 WARN [M:0;87fb68174c52:42317 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:31:40,544 INFO [M:0;87fb68174c52:42317 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/.lastflushedseqids 2024-11-07T12:31:40,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741849_1025 (size=130) 2024-11-07T12:31:40,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741849_1025 (size=130) 2024-11-07T12:31:40,549 INFO [M:0;87fb68174c52:42317 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:31:40,550 INFO [M:0;87fb68174c52:42317 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:31:40,550 DEBUG [M:0;87fb68174c52:42317 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:31:40,550 INFO [M:0;87fb68174c52:42317 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:31:40,550 DEBUG [M:0;87fb68174c52:42317 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:31:40,550 DEBUG [M:0;87fb68174c52:42317 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:31:40,550 DEBUG [M:0;87fb68174c52:42317 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:31:40,550 INFO [M:0;87fb68174c52:42317 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.62 KB heapSize=55.02 KB 2024-11-07T12:31:40,565 DEBUG [M:0;87fb68174c52:42317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/488a30a731264f2796098b456af6fc40 is 82, key is hbase:meta,,1/info:regioninfo/1730982649876/Put/seqid=0 2024-11-07T12:31:40,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741850_1026 (size=5672) 2024-11-07T12:31:40,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741850_1026 (size=5672) 2024-11-07T12:31:40,570 INFO [M:0;87fb68174c52:42317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/488a30a731264f2796098b456af6fc40 2024-11-07T12:31:40,588 DEBUG [M:0;87fb68174c52:42317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6f4940a8b037467d85106573f02c0e5f is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730982650292/Put/seqid=0 2024-11-07T12:31:40,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741851_1027 (size=7826) 2024-11-07T12:31:40,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741851_1027 (size=7826) 2024-11-07T12:31:40,592 INFO [M:0;87fb68174c52:42317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.02 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6f4940a8b037467d85106573f02c0e5f 2024-11-07T12:31:40,597 INFO [M:0;87fb68174c52:42317 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6f4940a8b037467d85106573f02c0e5f 2024-11-07T12:31:40,610 DEBUG [M:0;87fb68174c52:42317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a06e0b1b011b4059a1a336b7625db490 is 69, key is 87fb68174c52,38255,1730982649090/rs:state/1730982649330/Put/seqid=0 2024-11-07T12:31:40,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741852_1028 (size=5156) 2024-11-07T12:31:40,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741852_1028 (size=5156) 2024-11-07T12:31:40,615 INFO [M:0;87fb68174c52:42317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a06e0b1b011b4059a1a336b7625db490 2024-11-07T12:31:40,632 DEBUG [M:0;87fb68174c52:42317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/50b63d997f4145d1a3700473b21e9c94 is 52, key is load_balancer_on/state:d/1730982649920/Put/seqid=0 2024-11-07T12:31:40,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741853_1029 (size=5056) 2024-11-07T12:31:40,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741853_1029 (size=5056) 2024-11-07T12:31:40,637 INFO [M:0;87fb68174c52:42317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/50b63d997f4145d1a3700473b21e9c94 2024-11-07T12:31:40,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:31:40,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38255-0x1001a39275d0001, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:31:40,641 INFO [RS:0;87fb68174c52:38255 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:31:40,641 INFO [RS:0;87fb68174c52:38255 {}] regionserver.HRegionServer(1031): Exiting; stopping=87fb68174c52,38255,1730982649090; zookeeper connection closed. 2024-11-07T12:31:40,641 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@743dd2ff {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@743dd2ff 2024-11-07T12:31:40,641 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:31:40,642 DEBUG [M:0;87fb68174c52:42317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/488a30a731264f2796098b456af6fc40 as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/488a30a731264f2796098b456af6fc40 2024-11-07T12:31:40,646 INFO [M:0;87fb68174c52:42317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/488a30a731264f2796098b456af6fc40, entries=8, sequenceid=121, filesize=5.5 K 2024-11-07T12:31:40,647 DEBUG [M:0;87fb68174c52:42317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6f4940a8b037467d85106573f02c0e5f as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6f4940a8b037467d85106573f02c0e5f 2024-11-07T12:31:40,651 INFO [M:0;87fb68174c52:42317 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6f4940a8b037467d85106573f02c0e5f 2024-11-07T12:31:40,651 INFO [M:0;87fb68174c52:42317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6f4940a8b037467d85106573f02c0e5f, entries=14, sequenceid=121, filesize=7.6 K 2024-11-07T12:31:40,652 DEBUG [M:0;87fb68174c52:42317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a06e0b1b011b4059a1a336b7625db490 as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a06e0b1b011b4059a1a336b7625db490 2024-11-07T12:31:40,656 INFO [M:0;87fb68174c52:42317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a06e0b1b011b4059a1a336b7625db490, entries=1, sequenceid=121, filesize=5.0 K 2024-11-07T12:31:40,657 DEBUG [M:0;87fb68174c52:42317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/50b63d997f4145d1a3700473b21e9c94 as hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/50b63d997f4145d1a3700473b21e9c94 2024-11-07T12:31:40,661 INFO [M:0;87fb68174c52:42317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/1120a117-841b-661f-da3a-bbe01d5f21bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/50b63d997f4145d1a3700473b21e9c94, entries=1, sequenceid=121, filesize=4.9 K 2024-11-07T12:31:40,662 INFO [M:0;87fb68174c52:42317 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.62 KB/44668, heapSize ~54.96 KB/56280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=121, compaction requested=false 2024-11-07T12:31:40,664 INFO [M:0;87fb68174c52:42317 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:31:40,664 DEBUG [M:0;87fb68174c52:42317 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982700550Disabling compacts and flushes for region at 1730982700550Disabling writes for close at 1730982700550Obtaining lock to block concurrent updates at 1730982700550Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730982700550Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44668, getHeapSize=56280, getOffHeapSize=0, getCellsCount=140 at 1730982700550Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730982700551 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730982700551Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730982700565 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730982700565Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730982700574 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730982700587 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730982700587Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730982700597 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730982700610 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730982700610Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730982700619 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730982700632 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730982700632Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@184c71fa: reopening flushed file at 1730982700641 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a575080: reopening flushed file at 1730982700646 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c0be76b: reopening flushed file at 1730982700651 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b4f4e5b: reopening flushed file at 1730982700656 (+5 ms)Finished flush of dataSize ~43.62 KB/44668, heapSize ~54.96 KB/56280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=121, compaction requested=false at 1730982700662 (+6 ms)Writing region close event to WAL at 1730982700664 (+2 ms)Closed at 1730982700664 2024-11-07T12:31:40,664 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,664 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,664 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,664 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,665 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:31:40,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40411 is added to blk_1073741830_1006 (size=53065) 2024-11-07T12:31:40,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741830_1006 (size=53065) 2024-11-07T12:31:40,667 INFO [M:0;87fb68174c52:42317 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:31:40,667 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:31:40,667 INFO [M:0;87fb68174c52:42317 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42317 2024-11-07T12:31:40,667 INFO [M:0;87fb68174c52:42317 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:31:40,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:31:40,769 INFO [M:0;87fb68174c52:42317 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:31:40,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42317-0x1001a39275d0000, quorum=127.0.0.1:64652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:31:40,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@666a2e8d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:31:40,772 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3659c62{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:31:40,772 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:31:40,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@144fb880{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:31:40,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ff483aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/hadoop.log.dir/,STOPPED} 2024-11-07T12:31:40,774 WARN [BP-653850386-172.17.0.3-1730982648392 heartbeating to localhost/127.0.0.1:41511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:31:40,774 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:31:40,774 WARN [BP-653850386-172.17.0.3-1730982648392 heartbeating to localhost/127.0.0.1:41511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-653850386-172.17.0.3-1730982648392 (Datanode Uuid 534d355c-5058-4cd9-9eb5-2b9a4c7ee40a) service to localhost/127.0.0.1:41511 2024-11-07T12:31:40,774 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:31:40,774 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/data/data3/current/BP-653850386-172.17.0.3-1730982648392 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:31:40,775 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/data/data4/current/BP-653850386-172.17.0.3-1730982648392 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:31:40,775 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:31:40,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4732430a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:31:40,777 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5206174e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:31:40,777 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:31:40,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ac52070{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:31:40,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1592e8d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/hadoop.log.dir/,STOPPED} 2024-11-07T12:31:40,779 WARN [BP-653850386-172.17.0.3-1730982648392 heartbeating to localhost/127.0.0.1:41511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:31:40,779 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:31:40,779 WARN [BP-653850386-172.17.0.3-1730982648392 heartbeating to localhost/127.0.0.1:41511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-653850386-172.17.0.3-1730982648392 (Datanode Uuid 5aa9d217-6f94-40ed-a24d-67fdf05694b1) service to localhost/127.0.0.1:41511 2024-11-07T12:31:40,779 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:31:40,779 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/data/data1/current/BP-653850386-172.17.0.3-1730982648392 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:31:40,780 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/cluster_ad3f8a98-eca3-eea8-a4c3-a7c29f7fac93/data/data2/current/BP-653850386-172.17.0.3-1730982648392 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:31:40,780 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:31:40,786 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6df6a7f2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:31:40,786 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@520bb90c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:31:40,786 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:31:40,787 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35dc38db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:31:40,787 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e11e1e0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/hadoop.log.dir/,STOPPED} 2024-11-07T12:31:40,793 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:31:40,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:31:40,818 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 180) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41511 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41511 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41511 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/87fb68174c52:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41511 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41511 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41511 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:41511 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41511 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=29 (was 53), ProcessCount=11 (was 11), AvailableMemoryMB=6381 (was 6411) 2024-11-07T12:31:40,827 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=29, ProcessCount=11, AvailableMemoryMB=6381 2024-11-07T12:31:40,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:31:40,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/hadoop.log.dir so I do NOT create it in target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f 2024-11-07T12:31:40,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb467c0-25c2-7ae6-91b3-9c80cdab51c4/hadoop.tmp.dir so I do NOT create it in target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f 2024-11-07T12:31:40,827 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366, deleteOnExit=true 2024-11-07T12:31:40,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:31:40,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/test.cache.data in system properties and HBase conf 2024-11-07T12:31:40,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:31:40,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:31:40,828 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:31:40,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:31:40,829 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:31:40,841 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:31:40,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:40,895 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:31:40,898 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:31:40,900 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:31:40,900 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:31:40,900 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:31:40,900 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:31:40,901 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c5b7d7a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:31:40,901 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b7f954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:31:41,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2dd2b381{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/java.io.tmpdir/jetty-localhost-40127-hadoop-hdfs-3_4_1-tests_jar-_-any-1508176463295858315/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:31:41,014 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69826858{HTTP/1.1, (http/1.1)}{localhost:40127} 2024-11-07T12:31:41,014 INFO [Time-limited test {}] server.Server(415): Started @239686ms 2024-11-07T12:31:41,026 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:31:41,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:41,090 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:31:41,092 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:31:41,093 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:31:41,093 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:31:41,093 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:31:41,095 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f8818bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:31:41,095 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75b3fca0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:31:41,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@314e7370{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/java.io.tmpdir/jetty-localhost-37193-hadoop-hdfs-3_4_1-tests_jar-_-any-904519663998191337/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:31:41,208 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45bda0cb{HTTP/1.1, (http/1.1)}{localhost:37193} 2024-11-07T12:31:41,208 INFO [Time-limited test {}] server.Server(415): Started @239880ms 2024-11-07T12:31:41,209 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:31:41,237 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:31:41,239 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:31:41,240 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:31:41,240 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:31:41,240 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:31:41,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34accf12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:31:41,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79ecb530{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:31:41,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:41,290 WARN [Thread-1958 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/data/data1/current/BP-1980521643-172.17.0.3-1730982700847/current, will proceed with Du for space computation calculation, 2024-11-07T12:31:41,290 WARN [Thread-1959 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/data/data2/current/BP-1980521643-172.17.0.3-1730982700847/current, will proceed with Du for space computation calculation, 2024-11-07T12:31:41,311 WARN [Thread-1937 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:31:41,314 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcfa4e03fcdacbf87 with lease ID 0x292fff018e7f6e80: Processing first storage report for DS-4270387f-6679-4f80-aa10-0665ea71e04f from datanode DatanodeRegistration(127.0.0.1:45435, datanodeUuid=b421e928-a962-4517-9bda-a783f78d935c, infoPort=46835, infoSecurePort=0, ipcPort=39337, storageInfo=lv=-57;cid=testClusterID;nsid=1927848737;c=1730982700847) 2024-11-07T12:31:41,314 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcfa4e03fcdacbf87 with lease ID 0x292fff018e7f6e80: from storage DS-4270387f-6679-4f80-aa10-0665ea71e04f node DatanodeRegistration(127.0.0.1:45435, datanodeUuid=b421e928-a962-4517-9bda-a783f78d935c, infoPort=46835, infoSecurePort=0, ipcPort=39337, storageInfo=lv=-57;cid=testClusterID;nsid=1927848737;c=1730982700847), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:31:41,314 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcfa4e03fcdacbf87 with lease ID 0x292fff018e7f6e80: Processing first storage report for DS-6d44f105-ff56-4868-967d-8ea27af1a298 from datanode DatanodeRegistration(127.0.0.1:45435, datanodeUuid=b421e928-a962-4517-9bda-a783f78d935c, infoPort=46835, infoSecurePort=0, ipcPort=39337, storageInfo=lv=-57;cid=testClusterID;nsid=1927848737;c=1730982700847) 2024-11-07T12:31:41,314 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcfa4e03fcdacbf87 with lease ID 0x292fff018e7f6e80: from storage DS-6d44f105-ff56-4868-967d-8ea27af1a298 node DatanodeRegistration(127.0.0.1:45435, datanodeUuid=b421e928-a962-4517-9bda-a783f78d935c, infoPort=46835, infoSecurePort=0, ipcPort=39337, storageInfo=lv=-57;cid=testClusterID;nsid=1927848737;c=1730982700847), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:31:41,345 INFO [regionserver/87fb68174c52:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:31:41,357 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f53b1c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/java.io.tmpdir/jetty-localhost-43171-hadoop-hdfs-3_4_1-tests_jar-_-any-11285529735367868426/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:31:41,357 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@550f8afc{HTTP/1.1, (http/1.1)}{localhost:43171} 2024-11-07T12:31:41,358 INFO [Time-limited test {}] server.Server(415): Started @240030ms 2024-11-07T12:31:41,359 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:31:41,438 WARN [Thread-1984 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/data/data3/current/BP-1980521643-172.17.0.3-1730982700847/current, will proceed with Du for space computation calculation, 2024-11-07T12:31:41,439 WARN [Thread-1985 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/data/data4/current/BP-1980521643-172.17.0.3-1730982700847/current, will proceed with Du for space computation calculation, 2024-11-07T12:31:41,454 WARN [Thread-1973 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:31:41,456 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb7f9eea8ce4fb43 with lease ID 0x292fff018e7f6e81: Processing first storage report for DS-4459c04f-6b35-4a52-b342-33abf17b9032 from datanode DatanodeRegistration(127.0.0.1:41829, datanodeUuid=b243b6f2-cfdc-4c7b-a362-fde95abf6bd2, infoPort=37931, infoSecurePort=0, ipcPort=37009, storageInfo=lv=-57;cid=testClusterID;nsid=1927848737;c=1730982700847) 2024-11-07T12:31:41,456 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb7f9eea8ce4fb43 with lease ID 0x292fff018e7f6e81: from storage DS-4459c04f-6b35-4a52-b342-33abf17b9032 node DatanodeRegistration(127.0.0.1:41829, datanodeUuid=b243b6f2-cfdc-4c7b-a362-fde95abf6bd2, infoPort=37931, infoSecurePort=0, ipcPort=37009, storageInfo=lv=-57;cid=testClusterID;nsid=1927848737;c=1730982700847), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:31:41,456 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb7f9eea8ce4fb43 with lease ID 0x292fff018e7f6e81: Processing first storage report for DS-5c185dcc-ab4d-4104-bf2e-a9b33b8245f9 from datanode DatanodeRegistration(127.0.0.1:41829, datanodeUuid=b243b6f2-cfdc-4c7b-a362-fde95abf6bd2, infoPort=37931, infoSecurePort=0, ipcPort=37009, storageInfo=lv=-57;cid=testClusterID;nsid=1927848737;c=1730982700847) 2024-11-07T12:31:41,456 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb7f9eea8ce4fb43 with lease ID 0x292fff018e7f6e81: from storage DS-5c185dcc-ab4d-4104-bf2e-a9b33b8245f9 node DatanodeRegistration(127.0.0.1:41829, datanodeUuid=b243b6f2-cfdc-4c7b-a362-fde95abf6bd2, infoPort=37931, infoSecurePort=0, ipcPort=37009, storageInfo=lv=-57;cid=testClusterID;nsid=1927848737;c=1730982700847), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:31:41,479 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f 2024-11-07T12:31:41,483 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/zookeeper_0, clientPort=62282, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:31:41,484 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62282 2024-11-07T12:31:41,484 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:31:41,485 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:31:41,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:31:41,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:31:41,495 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a with version=8 2024-11-07T12:31:41,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/hbase-staging 2024-11-07T12:31:41,497 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:31:41,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:31:41,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:31:41,497 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:31:41,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:31:41,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:31:41,497 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:31:41,497 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:31:41,498 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37673 2024-11-07T12:31:41,499 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37673 connecting to ZooKeeper ensemble=127.0.0.1:62282 2024-11-07T12:31:41,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:376730x0, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:31:41,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37673-0x1001a39f4470000 connected 2024-11-07T12:31:41,524 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:31:41,525 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:31:41,528 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:31:41,528 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a, hbase.cluster.distributed=false 2024-11-07T12:31:41,529 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:31:41,531 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37673 2024-11-07T12:31:41,532 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37673 2024-11-07T12:31:41,532 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37673 2024-11-07T12:31:41,535 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37673 2024-11-07T12:31:41,535 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37673 2024-11-07T12:31:41,549 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:31:41,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:31:41,550 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:31:41,550 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:31:41,550 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:31:41,550 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:31:41,550 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:31:41,550 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:31:41,550 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33325 2024-11-07T12:31:41,552 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33325 connecting to ZooKeeper ensemble=127.0.0.1:62282 2024-11-07T12:31:41,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:31:41,554 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:31:41,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:333250x0, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:31:41,558 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33325-0x1001a39f4470001 connected 2024-11-07T12:31:41,558 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:31:41,558 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:31:41,558 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:31:41,559 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:31:41,560 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:31:41,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33325 2024-11-07T12:31:41,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33325 2024-11-07T12:31:41,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33325 2024-11-07T12:31:41,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33325 2024-11-07T12:31:41,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33325 2024-11-07T12:31:41,572 DEBUG [M:0;87fb68174c52:37673 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87fb68174c52:37673 2024-11-07T12:31:41,572 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87fb68174c52,37673,1730982701497 2024-11-07T12:31:41,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:31:41,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:31:41,574 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87fb68174c52,37673,1730982701497 2024-11-07T12:31:41,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:41,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:31:41,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:41,576 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:31:41,577 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87fb68174c52,37673,1730982701497 from backup master directory 2024-11-07T12:31:41,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87fb68174c52,37673,1730982701497 2024-11-07T12:31:41,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:31:41,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:31:41,578 WARN [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:31:41,578 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87fb68174c52,37673,1730982701497 2024-11-07T12:31:41,582 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/hbase.id] with ID: 63105050-f57b-4bdd-a389-0eff3d1cdbdb 2024-11-07T12:31:41,582 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/.tmp/hbase.id 2024-11-07T12:31:41,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:31:41,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:31:41,588 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/.tmp/hbase.id]:[hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/hbase.id] 2024-11-07T12:31:41,598 INFO [master/87fb68174c52:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:31:41,598 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:31:41,599 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-07T12:31:41,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:41,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:41,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:31:41,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:31:41,607 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:31:41,608 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:31:41,608 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:31:41,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:31:41,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:31:41,615 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store 2024-11-07T12:31:41,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:31:41,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:31:41,620 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:31:41,621 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:31:41,621 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:31:41,621 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:31:41,621 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:31:41,621 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:31:41,621 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:31:41,621 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982701621Disabling compacts and flushes for region at 1730982701621Disabling writes for close at 1730982701621Writing region close event to WAL at 1730982701621Closed at 1730982701621 2024-11-07T12:31:41,621 WARN [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/.initializing 2024-11-07T12:31:41,622 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/WALs/87fb68174c52,37673,1730982701497 2024-11-07T12:31:41,624 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C37673%2C1730982701497, suffix=, logDir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/WALs/87fb68174c52,37673,1730982701497, archiveDir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/oldWALs, maxLogs=10 2024-11-07T12:31:41,624 INFO [master/87fb68174c52:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C37673%2C1730982701497.1730982701624 2024-11-07T12:31:41,628 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/WALs/87fb68174c52,37673,1730982701497/87fb68174c52%2C37673%2C1730982701497.1730982701624 2024-11-07T12:31:41,628 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37931:37931),(127.0.0.1/127.0.0.1:46835:46835)] 2024-11-07T12:31:41,629 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:31:41,629 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:31:41,629 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,629 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:31:41,631 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:41,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:31:41,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:31:41,632 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:41,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:31:41,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:31:41,634 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:41,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:31:41,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:31:41,635 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:41,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:31:41,635 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,636 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,636 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,637 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,637 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,638 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:31:41,639 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:31:41,640 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:31:41,640 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822390, jitterRate=0.04572412371635437}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:31:41,641 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730982701629Initializing all the Stores at 1730982701630 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982701630Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982701630Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982701630Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982701630Cleaning up temporary data from old regions at 1730982701637 (+7 ms)Region opened successfully at 1730982701641 (+4 ms) 2024-11-07T12:31:41,641 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:31:41,644 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b2b5a1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:31:41,644 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:31:41,644 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:31:41,645 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:31:41,645 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:31:41,645 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:31:41,645 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:31:41,645 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:31:41,647 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:31:41,648 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:31:41,649 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:31:41,649 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:31:41,650 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:31:41,651 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:31:41,651 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:31:41,652 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:31:41,653 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:31:41,654 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:31:41,655 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:31:41,657 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:31:41,658 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:31:41,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:31:41,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:31:41,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:41,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:41,660 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87fb68174c52,37673,1730982701497, sessionid=0x1001a39f4470000, setting cluster-up flag (Was=false) 2024-11-07T12:31:41,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:41,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:41,667 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:31:41,667 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,37673,1730982701497 2024-11-07T12:31:41,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:41,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:41,676 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:31:41,676 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,37673,1730982701497 2024-11-07T12:31:41,678 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:31:41,679 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:31:41,679 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:31:41,679 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:31:41,680 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87fb68174c52,37673,1730982701497 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:31:41,681 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:31:41,681 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:31:41,681 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:31:41,681 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:31:41,681 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87fb68174c52:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:31:41,681 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,681 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:31:41,681 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,682 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730982731682 2024-11-07T12:31:41,682 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:31:41,682 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:31:41,682 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:31:41,682 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:31:41,682 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:31:41,683 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:31:41,683 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:31:41,683 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:31:41,683 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,683 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:31:41,683 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:31:41,683 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:31:41,683 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:41,683 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:31:41,684 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:31:41,684 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:31:41,684 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982701684,5,FailOnTimeoutGroup] 2024-11-07T12:31:41,684 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982701684,5,FailOnTimeoutGroup] 2024-11-07T12:31:41,684 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,684 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:31:41,684 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,684 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:31:41,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:31:41,692 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:31:41,692 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a 2024-11-07T12:31:41,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:31:41,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:31:41,698 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:31:41,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:31:41,701 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:31:41,701 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:41,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:31:41,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:31:41,702 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:31:41,702 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:41,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:31:41,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:31:41,704 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:31:41,704 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:41,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:31:41,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:31:41,705 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:31:41,705 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:41,705 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:31:41,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:31:41,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740 2024-11-07T12:31:41,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740 2024-11-07T12:31:41,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:31:41,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:31:41,708 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:31:41,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:31:41,710 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:31:41,711 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695308, jitterRate=-0.11587083339691162}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:31:41,711 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730982701699Initializing all the Stores at 1730982701699Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982701699Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982701699Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982701699Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982701699Cleaning up temporary data from old regions at 1730982701707 (+8 ms)Region opened successfully at 1730982701711 (+4 ms) 2024-11-07T12:31:41,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:31:41,712 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:31:41,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:31:41,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:31:41,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:31:41,712 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:31:41,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982701711Disabling compacts and flushes for region at 1730982701711Disabling writes for close at 1730982701712 (+1 ms)Writing region close event to WAL at 1730982701712Closed at 1730982701712 2024-11-07T12:31:41,713 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:31:41,713 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:31:41,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:31:41,715 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:31:41,716 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:31:41,763 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(746): ClusterId : 63105050-f57b-4bdd-a389-0eff3d1cdbdb 2024-11-07T12:31:41,763 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:31:41,765 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:31:41,765 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:31:41,767 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:31:41,767 DEBUG [RS:0;87fb68174c52:33325 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38c51de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:31:41,781 DEBUG [RS:0;87fb68174c52:33325 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87fb68174c52:33325 2024-11-07T12:31:41,781 INFO [RS:0;87fb68174c52:33325 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:31:41,781 INFO [RS:0;87fb68174c52:33325 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:31:41,781 DEBUG [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:31:41,781 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(2659): reportForDuty to master=87fb68174c52,37673,1730982701497 with port=33325, startcode=1730982701549 2024-11-07T12:31:41,781 DEBUG [RS:0;87fb68174c52:33325 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:31:41,783 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59017, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:31:41,784 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37673 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87fb68174c52,33325,1730982701549 2024-11-07T12:31:41,784 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37673 {}] master.ServerManager(517): Registering regionserver=87fb68174c52,33325,1730982701549 2024-11-07T12:31:41,785 DEBUG [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a 2024-11-07T12:31:41,785 DEBUG [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35145 2024-11-07T12:31:41,785 DEBUG [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:31:41,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:31:41,787 DEBUG [RS:0;87fb68174c52:33325 {}] zookeeper.ZKUtil(111): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87fb68174c52,33325,1730982701549 2024-11-07T12:31:41,787 WARN [RS:0;87fb68174c52:33325 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:31:41,787 INFO [RS:0;87fb68174c52:33325 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:31:41,788 DEBUG [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549 2024-11-07T12:31:41,788 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87fb68174c52,33325,1730982701549] 2024-11-07T12:31:41,790 INFO [RS:0;87fb68174c52:33325 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:31:41,792 INFO [RS:0;87fb68174c52:33325 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:31:41,792 INFO [RS:0;87fb68174c52:33325 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:31:41,792 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,793 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:31:41,793 INFO [RS:0;87fb68174c52:33325 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:31:41,793 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:31:41,794 DEBUG [RS:0;87fb68174c52:33325 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:31:41,794 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,794 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,794 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,794 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,794 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,794 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,33325,1730982701549-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:31:41,809 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:31:41,809 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,33325,1730982701549-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,809 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,809 INFO [RS:0;87fb68174c52:33325 {}] regionserver.Replication(171): 87fb68174c52,33325,1730982701549 started 2024-11-07T12:31:41,823 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:41,823 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(1482): Serving as 87fb68174c52,33325,1730982701549, RpcServer on 87fb68174c52/172.17.0.3:33325, sessionid=0x1001a39f4470001 2024-11-07T12:31:41,823 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:31:41,823 DEBUG [RS:0;87fb68174c52:33325 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87fb68174c52,33325,1730982701549 2024-11-07T12:31:41,823 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,33325,1730982701549' 2024-11-07T12:31:41,823 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:31:41,824 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:31:41,824 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:31:41,824 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:31:41,824 DEBUG [RS:0;87fb68174c52:33325 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87fb68174c52,33325,1730982701549 2024-11-07T12:31:41,824 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,33325,1730982701549' 2024-11-07T12:31:41,824 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:31:41,824 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:31:41,825 DEBUG [RS:0;87fb68174c52:33325 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:31:41,825 INFO [RS:0;87fb68174c52:33325 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:31:41,825 INFO [RS:0;87fb68174c52:33325 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:31:41,866 WARN [87fb68174c52:37673 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:31:41,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:41,927 INFO [RS:0;87fb68174c52:33325 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C33325%2C1730982701549, suffix=, logDir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549, archiveDir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/oldWALs, maxLogs=32 2024-11-07T12:31:41,927 INFO [RS:0;87fb68174c52:33325 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33325%2C1730982701549.1730982701927 2024-11-07T12:31:41,932 INFO [RS:0;87fb68174c52:33325 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549/87fb68174c52%2C33325%2C1730982701549.1730982701927 2024-11-07T12:31:41,933 DEBUG [RS:0;87fb68174c52:33325 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46835:46835),(127.0.0.1/127.0.0.1:37931:37931)] 2024-11-07T12:31:42,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:42,117 DEBUG [87fb68174c52:37673 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:31:42,117 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87fb68174c52,33325,1730982701549 2024-11-07T12:31:42,118 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,33325,1730982701549, state=OPENING 2024-11-07T12:31:42,120 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:31:42,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:42,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:31:42,122 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:31:42,122 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:31:42,122 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:31:42,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,33325,1730982701549}] 2024-11-07T12:31:42,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:42,275 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:31:42,277 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44573, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:31:42,280 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:31:42,280 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:31:42,281 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C33325%2C1730982701549.meta, suffix=.meta, logDir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549, archiveDir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/oldWALs, maxLogs=32 2024-11-07T12:31:42,282 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33325%2C1730982701549.meta.1730982702282.meta 2024-11-07T12:31:42,288 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549/87fb68174c52%2C33325%2C1730982701549.meta.1730982702282.meta 2024-11-07T12:31:42,296 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37931:37931),(127.0.0.1/127.0.0.1:46835:46835)] 2024-11-07T12:31:42,303 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:31:42,303 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:31:42,303 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:31:42,303 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:31:42,303 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:31:42,303 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:31:42,303 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:31:42,303 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:31:42,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:31:42,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:31:42,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:42,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:31:42,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:31:42,307 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:31:42,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:42,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:31:42,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:31:42,308 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:31:42,308 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:42,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:31:42,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:31:42,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:31:42,309 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:42,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:31:42,310 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:31:42,310 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740 2024-11-07T12:31:42,311 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740 2024-11-07T12:31:42,312 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:31:42,312 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:31:42,313 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:31:42,314 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:31:42,315 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718816, jitterRate=-0.08597885072231293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:31:42,315 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:31:42,315 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730982702303Writing region info on filesystem at 1730982702303Initializing all the Stores at 1730982702304 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982702304Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982702305 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982702305Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982702305Cleaning up temporary data from old regions at 1730982702312 (+7 ms)Running coprocessor post-open hooks at 1730982702315 (+3 ms)Region opened successfully at 1730982702315 2024-11-07T12:31:42,316 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730982702274 2024-11-07T12:31:42,318 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:31:42,318 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:31:42,319 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,33325,1730982701549 2024-11-07T12:31:42,320 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,33325,1730982701549, state=OPEN 2024-11-07T12:31:42,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:31:42,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:31:42,324 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87fb68174c52,33325,1730982701549 2024-11-07T12:31:42,324 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:31:42,324 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:31:42,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:31:42,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,33325,1730982701549 in 202 msec 2024-11-07T12:31:42,329 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:31:42,329 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-11-07T12:31:42,330 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:31:42,330 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:31:42,331 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:31:42,331 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,33325,1730982701549, seqNum=-1] 2024-11-07T12:31:42,331 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:31:42,333 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37223, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:31:42,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 658 msec 2024-11-07T12:31:42,338 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730982702337, completionTime=-1 2024-11-07T12:31:42,338 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:31:42,338 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-07T12:31:42,339 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-07T12:31:42,339 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730982762339 2024-11-07T12:31:42,339 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730982822339 2024-11-07T12:31:42,339 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-07T12:31:42,340 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,37673,1730982701497-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:42,340 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,37673,1730982701497-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:42,340 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,37673,1730982701497-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:42,340 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87fb68174c52:37673, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:42,340 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:42,340 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:42,341 DEBUG [master/87fb68174c52:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:31:42,343 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.765sec 2024-11-07T12:31:42,343 INFO [master/87fb68174c52:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:31:42,343 INFO [master/87fb68174c52:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:31:42,343 INFO [master/87fb68174c52:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:31:42,343 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:31:42,343 INFO [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:31:42,343 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,37673,1730982701497-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:31:42,343 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,37673,1730982701497-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:31:42,345 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:31:42,345 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:31:42,345 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,37673,1730982701497-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:31:42,363 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@774651ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:31:42,363 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87fb68174c52,37673,-1 for getting cluster id 2024-11-07T12:31:42,363 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:31:42,364 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '63105050-f57b-4bdd-a389-0eff3d1cdbdb' 2024-11-07T12:31:42,364 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:31:42,364 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "63105050-f57b-4bdd-a389-0eff3d1cdbdb" 2024-11-07T12:31:42,365 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@217420d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:31:42,365 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87fb68174c52,37673,-1] 2024-11-07T12:31:42,365 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:31:42,365 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:31:42,366 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60402, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:31:42,367 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42c5c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:31:42,367 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:31:42,368 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,33325,1730982701549, seqNum=-1] 2024-11-07T12:31:42,368 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:31:42,369 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39846, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:31:42,371 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87fb68174c52,37673,1730982701497 2024-11-07T12:31:42,371 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:31:42,374 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:31:42,374 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T12:31:42,375 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 87fb68174c52,37673,1730982701497 2024-11-07T12:31:42,375 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7bd08b43 2024-11-07T12:31:42,375 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:31:42,376 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60410, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:31:42,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37673 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-07T12:31:42,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37673 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-07T12:31:42,377 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:31:42,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37673 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-07T12:31:42,379 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:31:42,379 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:42,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-07T12:31:42,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:31:42,380 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:31:42,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741835_1011 (size=381) 2024-11-07T12:31:42,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741835_1011 (size=381) 2024-11-07T12:31:42,389 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ae2d13e7df9673fc8ab06b452d0d84b0, NAME => 'TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a 2024-11-07T12:31:42,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741836_1012 (size=64) 2024-11-07T12:31:42,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741836_1012 (size=64) 2024-11-07T12:31:42,395 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:31:42,395 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing ae2d13e7df9673fc8ab06b452d0d84b0, disabling compactions & flushes 2024-11-07T12:31:42,395 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:42,395 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:42,395 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. after waiting 0 ms 2024-11-07T12:31:42,395 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:42,395 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:42,395 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ae2d13e7df9673fc8ab06b452d0d84b0: Waiting for close lock at 1730982702395Disabling compacts and flushes for region at 1730982702395Disabling writes for close at 1730982702395Writing region close event to WAL at 1730982702395Closed at 1730982702395 2024-11-07T12:31:42,396 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:31:42,397 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1730982702396"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730982702396"}]},"ts":"1730982702396"} 2024-11-07T12:31:42,399 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T12:31:42,400 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:31:42,400 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730982702400"}]},"ts":"1730982702400"} 2024-11-07T12:31:42,402 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-07T12:31:42,402 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae2d13e7df9673fc8ab06b452d0d84b0, ASSIGN}] 2024-11-07T12:31:42,403 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae2d13e7df9673fc8ab06b452d0d84b0, ASSIGN 2024-11-07T12:31:42,404 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae2d13e7df9673fc8ab06b452d0d84b0, ASSIGN; state=OFFLINE, location=87fb68174c52,33325,1730982701549; forceNewPlan=false, retain=false 2024-11-07T12:31:42,555 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ae2d13e7df9673fc8ab06b452d0d84b0, regionState=OPENING, regionLocation=87fb68174c52,33325,1730982701549 2024-11-07T12:31:42,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae2d13e7df9673fc8ab06b452d0d84b0, ASSIGN because future has completed 2024-11-07T12:31:42,558 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ae2d13e7df9673fc8ab06b452d0d84b0, server=87fb68174c52,33325,1730982701549}] 2024-11-07T12:31:42,714 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:42,714 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ae2d13e7df9673fc8ab06b452d0d84b0, NAME => 'TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:31:42,714 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,714 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:31:42,714 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,714 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,715 INFO [StoreOpener-ae2d13e7df9673fc8ab06b452d0d84b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,717 INFO [StoreOpener-ae2d13e7df9673fc8ab06b452d0d84b0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ae2d13e7df9673fc8ab06b452d0d84b0 columnFamilyName info 2024-11-07T12:31:42,717 DEBUG [StoreOpener-ae2d13e7df9673fc8ab06b452d0d84b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:42,717 INFO [StoreOpener-ae2d13e7df9673fc8ab06b452d0d84b0-1 {}] regionserver.HStore(327): Store=ae2d13e7df9673fc8ab06b452d0d84b0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:31:42,717 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,718 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,718 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,719 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,719 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,720 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,722 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:31:42,722 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ae2d13e7df9673fc8ab06b452d0d84b0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805220, jitterRate=0.023890331387519836}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:31:42,722 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:42,723 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ae2d13e7df9673fc8ab06b452d0d84b0: Running coprocessor pre-open hook at 1730982702714Writing region info on filesystem at 1730982702714Initializing all the Stores at 1730982702715 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982702715Cleaning up temporary data from old regions at 1730982702719 (+4 ms)Running coprocessor post-open hooks at 1730982702722 (+3 ms)Region opened successfully at 1730982702723 (+1 ms) 2024-11-07T12:31:42,724 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., pid=6, masterSystemTime=1730982702710 2024-11-07T12:31:42,726 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:42,726 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:42,727 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ae2d13e7df9673fc8ab06b452d0d84b0, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,33325,1730982701549 2024-11-07T12:31:42,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ae2d13e7df9673fc8ab06b452d0d84b0, server=87fb68174c52,33325,1730982701549 because future has completed 2024-11-07T12:31:42,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:31:42,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ae2d13e7df9673fc8ab06b452d0d84b0, server=87fb68174c52,33325,1730982701549 in 172 msec 2024-11-07T12:31:42,734 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:31:42,734 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae2d13e7df9673fc8ab06b452d0d84b0, ASSIGN in 330 msec 2024-11-07T12:31:42,735 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:31:42,735 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730982702735"}]},"ts":"1730982702735"} 2024-11-07T12:31:42,737 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-07T12:31:42,738 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:31:42,740 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 361 msec 2024-11-07T12:31:42,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:43,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:43,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:43,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:44,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:44,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:44,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:45,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:45,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:45,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:45,888 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:31:45,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:45,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:46,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:46,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:46,517 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-07T12:31:46,517 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-07T12:31:46,518 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:31:46,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:47,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:47,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:47,791 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:31:47,791 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-07T12:31:47,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:48,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:48,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:48,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:49,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:49,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:49,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:50,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:50,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:50,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:51,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:51,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:51,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:52,020 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:31:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,023 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,046 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,046 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,046 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:31:52,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:52,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:31:52,428 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-07T12:31:52,428 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-07T12:31:52,431 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-07T12:31:52,431 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:52,433 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., hostname=87fb68174c52,33325,1730982701549, seqNum=2] 2024-11-07T12:31:52,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:52,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae2d13e7df9673fc8ab06b452d0d84b0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:31:52,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/a17746956ba043c6992e80c172ba2a41 is 1080, key is row0001/info:/1730982712434/Put/seqid=0 2024-11-07T12:31:52,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741837_1013 (size=12509) 2024-11-07T12:31:52,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741837_1013 (size=12509) 2024-11-07T12:31:52,468 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/a17746956ba043c6992e80c172ba2a41 2024-11-07T12:31:52,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/a17746956ba043c6992e80c172ba2a41 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/a17746956ba043c6992e80c172ba2a41 2024-11-07T12:31:52,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/a17746956ba043c6992e80c172ba2a41, entries=7, sequenceid=11, filesize=12.2 K 2024-11-07T12:31:52,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for ae2d13e7df9673fc8ab06b452d0d84b0 in 36ms, sequenceid=11, compaction requested=false 2024-11-07T12:31:52,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae2d13e7df9673fc8ab06b452d0d84b0: 2024-11-07T12:31:52,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:52,482 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae2d13e7df9673fc8ab06b452d0d84b0 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-07T12:31:52,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/5be82c8b31304eb4a5131ccdd3858c88 is 1080, key is row0008/info:/1730982712445/Put/seqid=0 2024-11-07T12:31:52,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741838_1014 (size=29761) 2024-11-07T12:31:52,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741838_1014 (size=29761) 2024-11-07T12:31:52,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/5be82c8b31304eb4a5131ccdd3858c88 2024-11-07T12:31:52,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/5be82c8b31304eb4a5131ccdd3858c88 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/5be82c8b31304eb4a5131ccdd3858c88 2024-11-07T12:31:52,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/5be82c8b31304eb4a5131ccdd3858c88, entries=23, sequenceid=37, filesize=29.1 K 2024-11-07T12:31:52,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for ae2d13e7df9673fc8ab06b452d0d84b0 in 20ms, sequenceid=37, compaction requested=false 2024-11-07T12:31:52,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae2d13e7df9673fc8ab06b452d0d84b0: 2024-11-07T12:31:52,502 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-07T12:31:52,502 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:52,502 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/5be82c8b31304eb4a5131ccdd3858c88 because midkey is the same as first or last row 2024-11-07T12:31:52,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:53,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:53,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:53,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:54,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:54,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:54,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:54,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae2d13e7df9673fc8ab06b452d0d84b0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:31:54,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/595ce4d8ce7a4728b156d1feee48d011 is 1080, key is row0031/info:/1730982712483/Put/seqid=0 2024-11-07T12:31:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741839_1015 (size=12509) 2024-11-07T12:31:54,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741839_1015 (size=12509) 2024-11-07T12:31:54,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/595ce4d8ce7a4728b156d1feee48d011 2024-11-07T12:31:54,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/595ce4d8ce7a4728b156d1feee48d011 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/595ce4d8ce7a4728b156d1feee48d011 2024-11-07T12:31:54,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/595ce4d8ce7a4728b156d1feee48d011, entries=7, sequenceid=47, filesize=12.2 K 2024-11-07T12:31:54,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for ae2d13e7df9673fc8ab06b452d0d84b0 in 24ms, sequenceid=47, compaction requested=true 2024-11-07T12:31:54,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae2d13e7df9673fc8ab06b452d0d84b0: 2024-11-07T12:31:54,519 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-07T12:31:54,519 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:54,519 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/5be82c8b31304eb4a5131ccdd3858c88 because midkey is the same as first or last row 2024-11-07T12:31:54,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ae2d13e7df9673fc8ab06b452d0d84b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:31:54,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:31:54,519 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:31:54,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:54,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae2d13e7df9673fc8ab06b452d0d84b0 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-07T12:31:54,520 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:31:54,521 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1541): ae2d13e7df9673fc8ab06b452d0d84b0/info is initiating minor compaction (all files) 2024-11-07T12:31:54,521 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae2d13e7df9673fc8ab06b452d0d84b0/info in TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:54,521 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/a17746956ba043c6992e80c172ba2a41, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/5be82c8b31304eb4a5131ccdd3858c88, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/595ce4d8ce7a4728b156d1feee48d011] into tmpdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp, totalSize=53.5 K 2024-11-07T12:31:54,521 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting a17746956ba043c6992e80c172ba2a41, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1730982712434 2024-11-07T12:31:54,522 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5be82c8b31304eb4a5131ccdd3858c88, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730982712445 2024-11-07T12:31:54,522 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 595ce4d8ce7a4728b156d1feee48d011, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1730982712483 2024-11-07T12:31:54,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/6678d7ea8b9e4342aa5e0552d61b8db0 is 1080, key is row0038/info:/1730982714495/Put/seqid=0 2024-11-07T12:31:54,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741840_1016 (size=22222) 2024-11-07T12:31:54,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741840_1016 (size=22222) 2024-11-07T12:31:54,530 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/6678d7ea8b9e4342aa5e0552d61b8db0 2024-11-07T12:31:54,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/6678d7ea8b9e4342aa5e0552d61b8db0 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/6678d7ea8b9e4342aa5e0552d61b8db0 2024-11-07T12:31:54,538 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae2d13e7df9673fc8ab06b452d0d84b0#info#compaction#59 average throughput is 37.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:31:54,539 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/965c01cdcc634809bce1c8709f439800 is 1080, key is row0001/info:/1730982712434/Put/seqid=0 2024-11-07T12:31:54,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/6678d7ea8b9e4342aa5e0552d61b8db0, entries=16, sequenceid=66, filesize=21.7 K 2024-11-07T12:31:54,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=11.56 KB/11836 for ae2d13e7df9673fc8ab06b452d0d84b0 in 22ms, sequenceid=66, compaction requested=false 2024-11-07T12:31:54,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae2d13e7df9673fc8ab06b452d0d84b0: 2024-11-07T12:31:54,542 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=75.2 K, sizeToCheck=16.0 K 2024-11-07T12:31:54,542 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:54,542 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/5be82c8b31304eb4a5131ccdd3858c88 because midkey is the same as first or last row 2024-11-07T12:31:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741841_1017 (size=44978) 2024-11-07T12:31:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741841_1017 (size=44978) 2024-11-07T12:31:54,553 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/965c01cdcc634809bce1c8709f439800 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/965c01cdcc634809bce1c8709f439800 2024-11-07T12:31:54,558 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ae2d13e7df9673fc8ab06b452d0d84b0/info of ae2d13e7df9673fc8ab06b452d0d84b0 into 965c01cdcc634809bce1c8709f439800(size=43.9 K), total size for store is 65.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae2d13e7df9673fc8ab06b452d0d84b0: 2024-11-07T12:31:54,558 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., storeName=ae2d13e7df9673fc8ab06b452d0d84b0/info, priority=13, startTime=1730982714519; duration=0sec 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/965c01cdcc634809bce1c8709f439800 because midkey is the same as first or last row 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/965c01cdcc634809bce1c8709f439800 because midkey is the same as first or last row 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/965c01cdcc634809bce1c8709f439800 because midkey is the same as first or last row 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:31:54,558 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae2d13e7df9673fc8ab06b452d0d84b0:info 2024-11-07T12:31:54,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:55,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:55,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:55,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:56,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:56,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:56,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae2d13e7df9673fc8ab06b452d0d84b0 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-07T12:31:56,547 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/c5cd6e87dd5241258fe0c6d4a2deb259 is 1080, key is row0054/info:/1730982714521/Put/seqid=0 2024-11-07T12:31:56,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741842_1018 (size=17894) 2024-11-07T12:31:56,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741842_1018 (size=17894) 2024-11-07T12:31:56,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/c5cd6e87dd5241258fe0c6d4a2deb259 2024-11-07T12:31:56,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/c5cd6e87dd5241258fe0c6d4a2deb259 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c5cd6e87dd5241258fe0c6d4a2deb259 2024-11-07T12:31:56,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c5cd6e87dd5241258fe0c6d4a2deb259, entries=12, sequenceid=82, filesize=17.5 K 2024-11-07T12:31:56,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for ae2d13e7df9673fc8ab06b452d0d84b0 in 23ms, sequenceid=82, compaction requested=true 2024-11-07T12:31:56,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae2d13e7df9673fc8ab06b452d0d84b0: 2024-11-07T12:31:56,566 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-07T12:31:56,566 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:56,566 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/965c01cdcc634809bce1c8709f439800 because midkey is the same as first or last row 2024-11-07T12:31:56,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ae2d13e7df9673fc8ab06b452d0d84b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:31:56,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:31:56,567 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:31:56,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae2d13e7df9673fc8ab06b452d0d84b0 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-07T12:31:56,568 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:31:56,568 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1541): ae2d13e7df9673fc8ab06b452d0d84b0/info is initiating minor compaction (all files) 2024-11-07T12:31:56,568 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae2d13e7df9673fc8ab06b452d0d84b0/info in TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:56,568 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/965c01cdcc634809bce1c8709f439800, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/6678d7ea8b9e4342aa5e0552d61b8db0, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c5cd6e87dd5241258fe0c6d4a2deb259] into tmpdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp, totalSize=83.1 K 2024-11-07T12:31:56,569 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 965c01cdcc634809bce1c8709f439800, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1730982712434 2024-11-07T12:31:56,569 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6678d7ea8b9e4342aa5e0552d61b8db0, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1730982714495 2024-11-07T12:31:56,570 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting c5cd6e87dd5241258fe0c6d4a2deb259, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1730982714521 2024-11-07T12:31:56,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/c6a1465e426c406594d245e77fbc8e84 is 1080, key is row0066/info:/1730982716544/Put/seqid=0 2024-11-07T12:31:56,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741843_1019 (size=21141) 2024-11-07T12:31:56,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741843_1019 (size=21141) 2024-11-07T12:31:56,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/c6a1465e426c406594d245e77fbc8e84 2024-11-07T12:31:56,584 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae2d13e7df9673fc8ab06b452d0d84b0#info#compaction#62 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:31:56,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/c6a1465e426c406594d245e77fbc8e84 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c6a1465e426c406594d245e77fbc8e84 2024-11-07T12:31:56,584 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/3efa58aa2e4146bc8c97c59d8e485173 is 1080, key is row0001/info:/1730982712434/Put/seqid=0 2024-11-07T12:31:56,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c6a1465e426c406594d245e77fbc8e84, entries=15, sequenceid=100, filesize=20.6 K 2024-11-07T12:31:56,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for ae2d13e7df9673fc8ab06b452d0d84b0 in 24ms, sequenceid=100, compaction requested=false 2024-11-07T12:31:56,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae2d13e7df9673fc8ab06b452d0d84b0: 2024-11-07T12:31:56,591 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.7 K, sizeToCheck=16.0 K 2024-11-07T12:31:56,591 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:56,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,591 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/965c01cdcc634809bce1c8709f439800 because midkey is the same as first or last row 2024-11-07T12:31:56,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae2d13e7df9673fc8ab06b452d0d84b0 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-07T12:31:56,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/40c5caa86ca64f2eb26027db7fdde0d0 is 1080, key is row0081/info:/1730982716568/Put/seqid=0 2024-11-07T12:31:56,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741844_1020 (size=75378) 2024-11-07T12:31:56,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741844_1020 (size=75378) 2024-11-07T12:31:56,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741845_1021 (size=18987) 2024-11-07T12:31:56,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741845_1021 (size=18987) 2024-11-07T12:31:56,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/40c5caa86ca64f2eb26027db7fdde0d0 2024-11-07T12:31:56,605 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/3efa58aa2e4146bc8c97c59d8e485173 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/3efa58aa2e4146bc8c97c59d8e485173 2024-11-07T12:31:56,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/40c5caa86ca64f2eb26027db7fdde0d0 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/40c5caa86ca64f2eb26027db7fdde0d0 2024-11-07T12:31:56,611 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ae2d13e7df9673fc8ab06b452d0d84b0/info of ae2d13e7df9673fc8ab06b452d0d84b0 into 3efa58aa2e4146bc8c97c59d8e485173(size=73.6 K), total size for store is 94.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:31:56,611 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae2d13e7df9673fc8ab06b452d0d84b0: 2024-11-07T12:31:56,611 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., storeName=ae2d13e7df9673fc8ab06b452d0d84b0/info, priority=13, startTime=1730982716567; duration=0sec 2024-11-07T12:31:56,611 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-07T12:31:56,611 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:56,611 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/40c5caa86ca64f2eb26027db7fdde0d0, entries=13, sequenceid=116, filesize=18.5 K 2024-11-07T12:31:56,611 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-07T12:31:56,611 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:56,611 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-07T12:31:56,611 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:56,612 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=3.15 KB/3228 for ae2d13e7df9673fc8ab06b452d0d84b0 in 21ms, sequenceid=116, compaction requested=true 2024-11-07T12:31:56,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae2d13e7df9673fc8ab06b452d0d84b0: 2024-11-07T12:31:56,612 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-07T12:31:56,612 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:56,612 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:31:56,613 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:31:56,613 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae2d13e7df9673fc8ab06b452d0d84b0:info 2024-11-07T12:31:56,613 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-07T12:31:56,613 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:56,613 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-07T12:31:56,613 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:31:56,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-07T12:31:56,614 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37673 {}] assignment.AssignmentManager(1355): Split request from 87fb68174c52,33325,1730982701549, parent={ENCODED => ae2d13e7df9673fc8ab06b452d0d84b0, NAME => 'TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-07T12:31:56,618 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37673 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=87fb68174c52,33325,1730982701549 2024-11-07T12:31:56,622 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37673 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=106fd831981b255f7952714d962d8fb9, daughterB=dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:56,623 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=106fd831981b255f7952714d962d8fb9, daughterB=dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:56,623 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=106fd831981b255f7952714d962d8fb9, daughterB=dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:56,623 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=106fd831981b255f7952714d962d8fb9, daughterB=dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:56,624 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37673 {}] assignment.AssignmentManager(1355): Split request from 87fb68174c52,33325,1730982701549, parent={ENCODED => ae2d13e7df9673fc8ab06b452d0d84b0, NAME => 'TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-07T12:31:56,625 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37673 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=87fb68174c52,33325,1730982701549 2024-11-07T12:31:56,626 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37673 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=1e3d57b452248e6c91d1ee036def4aa3, daughterB=5ed3dd24026e3aa397e68ef0b467b769 2024-11-07T12:31:56,626 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=1e3d57b452248e6c91d1ee036def4aa3, daughterB=5ed3dd24026e3aa397e68ef0b467b769 held by pid=7 2024-11-07T12:31:56,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae2d13e7df9673fc8ab06b452d0d84b0, UNASSIGN}] 2024-11-07T12:31:56,633 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-07T12:31:56,633 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae2d13e7df9673fc8ab06b452d0d84b0, UNASSIGN 2024-11-07T12:31:56,633 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=1e3d57b452248e6c91d1ee036def4aa3, daughterB=5ed3dd24026e3aa397e68ef0b467b769 2024-11-07T12:31:56,635 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=ae2d13e7df9673fc8ab06b452d0d84b0, regionState=CLOSING, regionLocation=87fb68174c52,33325,1730982701549 2024-11-07T12:31:56,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae2d13e7df9673fc8ab06b452d0d84b0, UNASSIGN because future has completed 2024-11-07T12:31:56,637 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-07T12:31:56,637 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure ae2d13e7df9673fc8ab06b452d0d84b0, server=87fb68174c52,33325,1730982701549}] 2024-11-07T12:31:56,794 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,794 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-07T12:31:56,795 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing ae2d13e7df9673fc8ab06b452d0d84b0, disabling compactions & flushes 2024-11-07T12:31:56,795 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:56,795 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:56,795 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. after waiting 0 ms 2024-11-07T12:31:56,795 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:56,795 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing ae2d13e7df9673fc8ab06b452d0d84b0 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-07T12:31:56,799 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/c30e7b4010f3414890cba0daff6950b7 is 1080, key is row0094/info:/1730982716593/Put/seqid=0 2024-11-07T12:31:56,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741846_1022 (size=8193) 2024-11-07T12:31:56,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741846_1022 (size=8193) 2024-11-07T12:31:56,804 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/c30e7b4010f3414890cba0daff6950b7 2024-11-07T12:31:56,809 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/.tmp/info/c30e7b4010f3414890cba0daff6950b7 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c30e7b4010f3414890cba0daff6950b7 2024-11-07T12:31:56,814 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c30e7b4010f3414890cba0daff6950b7, entries=3, sequenceid=123, filesize=8.0 K 2024-11-07T12:31:56,815 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ae2d13e7df9673fc8ab06b452d0d84b0 in 19ms, sequenceid=123, compaction requested=true 2024-11-07T12:31:56,815 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/a17746956ba043c6992e80c172ba2a41, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/5be82c8b31304eb4a5131ccdd3858c88, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/965c01cdcc634809bce1c8709f439800, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/595ce4d8ce7a4728b156d1feee48d011, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/6678d7ea8b9e4342aa5e0552d61b8db0, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c5cd6e87dd5241258fe0c6d4a2deb259] to archive 2024-11-07T12:31:56,816 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:31:56,818 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/a17746956ba043c6992e80c172ba2a41 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/a17746956ba043c6992e80c172ba2a41 2024-11-07T12:31:56,819 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/5be82c8b31304eb4a5131ccdd3858c88 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/5be82c8b31304eb4a5131ccdd3858c88 2024-11-07T12:31:56,820 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/965c01cdcc634809bce1c8709f439800 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/965c01cdcc634809bce1c8709f439800 2024-11-07T12:31:56,821 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/595ce4d8ce7a4728b156d1feee48d011 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/595ce4d8ce7a4728b156d1feee48d011 2024-11-07T12:31:56,822 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/6678d7ea8b9e4342aa5e0552d61b8db0 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/6678d7ea8b9e4342aa5e0552d61b8db0 2024-11-07T12:31:56,823 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c5cd6e87dd5241258fe0c6d4a2deb259 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c5cd6e87dd5241258fe0c6d4a2deb259 2024-11-07T12:31:56,829 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-07T12:31:56,830 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. 2024-11-07T12:31:56,830 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for ae2d13e7df9673fc8ab06b452d0d84b0: Waiting for close lock at 1730982716795Running coprocessor pre-close hooks at 1730982716795Disabling compacts and flushes for region at 1730982716795Disabling writes for close at 1730982716795Obtaining lock to block concurrent updates at 1730982716795Preparing flush snapshotting stores in ae2d13e7df9673fc8ab06b452d0d84b0 at 1730982716795Finished memstore snapshotting TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1730982716795Flushing stores of TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. at 1730982716796 (+1 ms)Flushing ae2d13e7df9673fc8ab06b452d0d84b0/info: creating writer at 1730982716796Flushing ae2d13e7df9673fc8ab06b452d0d84b0/info: appending metadata at 1730982716799 (+3 ms)Flushing ae2d13e7df9673fc8ab06b452d0d84b0/info: closing flushed file at 1730982716799Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@701873d7: reopening flushed file at 1730982716809 (+10 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ae2d13e7df9673fc8ab06b452d0d84b0 in 19ms, sequenceid=123, compaction requested=true at 1730982716815 (+6 ms)Writing region close event to WAL at 1730982716826 (+11 ms)Running coprocessor post-close hooks at 1730982716830 (+4 ms)Closed at 1730982716830 2024-11-07T12:31:56,832 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,833 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=ae2d13e7df9673fc8ab06b452d0d84b0, regionState=CLOSED 2024-11-07T12:31:56,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure ae2d13e7df9673fc8ab06b452d0d84b0, server=87fb68174c52,33325,1730982701549 because future has completed 2024-11-07T12:31:56,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-07T12:31:56,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure ae2d13e7df9673fc8ab06b452d0d84b0, server=87fb68174c52,33325,1730982701549 in 199 msec 2024-11-07T12:31:56,840 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-07T12:31:56,840 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae2d13e7df9673fc8ab06b452d0d84b0, UNASSIGN in 207 msec 2024-11-07T12:31:56,848 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:56,851 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=ae2d13e7df9673fc8ab06b452d0d84b0, threads=4 2024-11-07T12:31:56,853 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c6a1465e426c406594d245e77fbc8e84 for region: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,853 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/40c5caa86ca64f2eb26027db7fdde0d0 for region: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,853 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c30e7b4010f3414890cba0daff6950b7 for region: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,853 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/3efa58aa2e4146bc8c97c59d8e485173 for region: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,864 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c30e7b4010f3414890cba0daff6950b7, top=true 2024-11-07T12:31:56,864 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/40c5caa86ca64f2eb26027db7fdde0d0, top=true 2024-11-07T12:31:56,864 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c6a1465e426c406594d245e77fbc8e84, top=true 2024-11-07T12:31:56,873 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c6a1465e426c406594d245e77fbc8e84 for child: dc6971b5fdb896c599e17dcb1681742a, parent: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,873 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-40c5caa86ca64f2eb26027db7fdde0d0 for child: dc6971b5fdb896c599e17dcb1681742a, parent: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,873 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/40c5caa86ca64f2eb26027db7fdde0d0 for region: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,873 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c6a1465e426c406594d245e77fbc8e84 for region: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,875 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c30e7b4010f3414890cba0daff6950b7 for child: dc6971b5fdb896c599e17dcb1681742a, parent: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,875 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/c30e7b4010f3414890cba0daff6950b7 for region: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741847_1023 (size=27) 2024-11-07T12:31:56,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741847_1023 (size=27) 2024-11-07T12:31:56,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:56,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741848_1024 (size=27) 2024-11-07T12:31:56,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741848_1024 (size=27) 2024-11-07T12:31:56,887 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/3efa58aa2e4146bc8c97c59d8e485173 for region: ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:31:56,889 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region ae2d13e7df9673fc8ab06b452d0d84b0 Daughter A: [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0] storefiles, Daughter B: [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-40c5caa86ca64f2eb26027db7fdde0d0, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c30e7b4010f3414890cba0daff6950b7, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c6a1465e426c406594d245e77fbc8e84] storefiles. 2024-11-07T12:31:56,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741849_1025 (size=71) 2024-11-07T12:31:56,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741849_1025 (size=71) 2024-11-07T12:31:56,898 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:56,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741850_1026 (size=71) 2024-11-07T12:31:56,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741850_1026 (size=71) 2024-11-07T12:31:56,911 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:56,920 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-07T12:31:56,922 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-07T12:31:56,924 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1730982716924"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1730982716924"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1730982716924"}]},"ts":"1730982716924"} 2024-11-07T12:31:56,924 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1730982716924"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730982716924"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1730982716924"}]},"ts":"1730982716924"} 2024-11-07T12:31:56,924 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1730982716924"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730982716924"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1730982716924"}]},"ts":"1730982716924"} 2024-11-07T12:31:56,941 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=106fd831981b255f7952714d962d8fb9, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=dc6971b5fdb896c599e17dcb1681742a, ASSIGN}] 2024-11-07T12:31:56,942 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=106fd831981b255f7952714d962d8fb9, ASSIGN 2024-11-07T12:31:56,942 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=dc6971b5fdb896c599e17dcb1681742a, ASSIGN 2024-11-07T12:31:56,943 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=106fd831981b255f7952714d962d8fb9, ASSIGN; state=SPLITTING_NEW, location=87fb68174c52,33325,1730982701549; forceNewPlan=false, retain=false 2024-11-07T12:31:56,943 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=dc6971b5fdb896c599e17dcb1681742a, ASSIGN; state=SPLITTING_NEW, location=87fb68174c52,33325,1730982701549; forceNewPlan=false, retain=false 2024-11-07T12:31:57,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:57,093 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=dc6971b5fdb896c599e17dcb1681742a, regionState=OPENING, regionLocation=87fb68174c52,33325,1730982701549 2024-11-07T12:31:57,093 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=106fd831981b255f7952714d962d8fb9, regionState=OPENING, regionLocation=87fb68174c52,33325,1730982701549 2024-11-07T12:31:57,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=106fd831981b255f7952714d962d8fb9, ASSIGN because future has completed 2024-11-07T12:31:57,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 106fd831981b255f7952714d962d8fb9, server=87fb68174c52,33325,1730982701549}] 2024-11-07T12:31:57,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=dc6971b5fdb896c599e17dcb1681742a, ASSIGN because future has completed 2024-11-07T12:31:57,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc6971b5fdb896c599e17dcb1681742a, server=87fb68174c52,33325,1730982701549}] 2024-11-07T12:31:57,252 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. 2024-11-07T12:31:57,252 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 106fd831981b255f7952714d962d8fb9, NAME => 'TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-07T12:31:57,252 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,252 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:31:57,252 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,252 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,254 INFO [StoreOpener-106fd831981b255f7952714d962d8fb9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,254 INFO [StoreOpener-106fd831981b255f7952714d962d8fb9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 106fd831981b255f7952714d962d8fb9 columnFamilyName info 2024-11-07T12:31:57,254 DEBUG [StoreOpener-106fd831981b255f7952714d962d8fb9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:57,265 DEBUG [StoreOpener-106fd831981b255f7952714d962d8fb9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0->hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/3efa58aa2e4146bc8c97c59d8e485173-bottom 2024-11-07T12:31:57,265 INFO [StoreOpener-106fd831981b255f7952714d962d8fb9-1 {}] regionserver.HStore(327): Store=106fd831981b255f7952714d962d8fb9/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:31:57,265 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,266 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,267 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,267 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,267 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,269 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,270 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 106fd831981b255f7952714d962d8fb9; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849969, jitterRate=0.08079177141189575}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:31:57,270 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 106fd831981b255f7952714d962d8fb9 2024-11-07T12:31:57,270 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 106fd831981b255f7952714d962d8fb9: Running coprocessor pre-open hook at 1730982717253Writing region info on filesystem at 1730982717253Initializing all the Stores at 1730982717253Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982717253Cleaning up temporary data from old regions at 1730982717267 (+14 ms)Running coprocessor post-open hooks at 1730982717270 (+3 ms)Region opened successfully at 1730982717270 2024-11-07T12:31:57,271 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9., pid=13, masterSystemTime=1730982717248 2024-11-07T12:31:57,271 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 106fd831981b255f7952714d962d8fb9:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:31:57,271 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:31:57,271 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-07T12:31:57,272 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. 2024-11-07T12:31:57,272 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1541): 106fd831981b255f7952714d962d8fb9/info is initiating minor compaction (all files) 2024-11-07T12:31:57,272 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 106fd831981b255f7952714d962d8fb9/info in TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. 2024-11-07T12:31:57,272 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0->hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/3efa58aa2e4146bc8c97c59d8e485173-bottom] into tmpdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/.tmp, totalSize=73.6 K 2024-11-07T12:31:57,273 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1730982712434 2024-11-07T12:31:57,273 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. 2024-11-07T12:31:57,273 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. 2024-11-07T12:31:57,273 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:31:57,274 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => dc6971b5fdb896c599e17dcb1681742a, NAME => 'TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-07T12:31:57,274 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,274 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:31:57,274 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,274 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,274 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=106fd831981b255f7952714d962d8fb9, regionState=OPEN, openSeqNum=127, regionLocation=87fb68174c52,33325,1730982701549 2024-11-07T12:31:57,275 INFO [StoreOpener-dc6971b5fdb896c599e17dcb1681742a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,276 INFO [StoreOpener-dc6971b5fdb896c599e17dcb1681742a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dc6971b5fdb896c599e17dcb1681742a columnFamilyName info 2024-11-07T12:31:57,276 DEBUG [StoreOpener-dc6971b5fdb896c599e17dcb1681742a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:31:57,276 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-07T12:31:57,277 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-07T12:31:57,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-07T12:31:57,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 106fd831981b255f7952714d962d8fb9, server=87fb68174c52,33325,1730982701549 because future has completed 2024-11-07T12:31:57,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:57,285 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-07T12:31:57,285 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 106fd831981b255f7952714d962d8fb9, server=87fb68174c52,33325,1730982701549 in 186 msec 2024-11-07T12:31:57,286 DEBUG [StoreOpener-dc6971b5fdb896c599e17dcb1681742a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0->hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/3efa58aa2e4146bc8c97c59d8e485173-top 2024-11-07T12:31:57,287 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=106fd831981b255f7952714d962d8fb9, ASSIGN in 345 msec 2024-11-07T12:31:57,291 DEBUG [StoreOpener-dc6971b5fdb896c599e17dcb1681742a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-40c5caa86ca64f2eb26027db7fdde0d0 2024-11-07T12:31:57,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/info/55ea5afab1a04dc5884a40179ae76642 is 193, key is TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a./info:regioninfo/1730982717093/Put/seqid=0 2024-11-07T12:31:57,295 DEBUG [StoreOpener-dc6971b5fdb896c599e17dcb1681742a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c30e7b4010f3414890cba0daff6950b7 2024-11-07T12:31:57,295 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 106fd831981b255f7952714d962d8fb9#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:31:57,296 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/.tmp/info/74e7006d79884231b5f87026e4161fef is 1080, key is row0001/info:/1730982712434/Put/seqid=0 2024-11-07T12:31:57,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741851_1027 (size=9847) 2024-11-07T12:31:57,300 DEBUG [StoreOpener-dc6971b5fdb896c599e17dcb1681742a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c6a1465e426c406594d245e77fbc8e84 2024-11-07T12:31:57,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741851_1027 (size=9847) 2024-11-07T12:31:57,300 INFO [StoreOpener-dc6971b5fdb896c599e17dcb1681742a-1 {}] regionserver.HStore(327): Store=dc6971b5fdb896c599e17dcb1681742a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:31:57,300 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741852_1028 (size=70862) 2024-11-07T12:31:57,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741852_1028 (size=70862) 2024-11-07T12:31:57,301 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,303 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/info/55ea5afab1a04dc5884a40179ae76642 2024-11-07T12:31:57,303 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,303 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,305 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,306 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened dc6971b5fdb896c599e17dcb1681742a; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805586, jitterRate=0.024355903267860413}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:31:57,307 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:31:57,307 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for dc6971b5fdb896c599e17dcb1681742a: Running coprocessor pre-open hook at 1730982717274Writing region info on filesystem at 1730982717274Initializing all the Stores at 1730982717275 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982717275Cleaning up temporary data from old regions at 1730982717303 (+28 ms)Running coprocessor post-open hooks at 1730982717307 (+4 ms)Region opened successfully at 1730982717307 2024-11-07T12:31:57,307 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/.tmp/info/74e7006d79884231b5f87026e4161fef as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/info/74e7006d79884231b5f87026e4161fef 2024-11-07T12:31:57,308 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a., pid=14, masterSystemTime=1730982717248 2024-11-07T12:31:57,308 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store dc6971b5fdb896c599e17dcb1681742a:info, priority=-2147483648, current under compaction store size is 2 2024-11-07T12:31:57,308 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:31:57,308 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T12:31:57,311 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=dc6971b5fdb896c599e17dcb1681742a, regionState=OPEN, openSeqNum=127, regionLocation=87fb68174c52,33325,1730982701549 2024-11-07T12:31:57,311 DEBUG [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:31:57,311 INFO [RS_OPEN_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:31:57,312 INFO [RS:0;87fb68174c52:33325-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:31:57,312 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] regionserver.HStore(1541): dc6971b5fdb896c599e17dcb1681742a/info is initiating minor compaction (all files) 2024-11-07T12:31:57,312 INFO [RS:0;87fb68174c52:33325-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dc6971b5fdb896c599e17dcb1681742a/info in TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:31:57,312 INFO [RS:0;87fb68174c52:33325-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0->hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/3efa58aa2e4146bc8c97c59d8e485173-top, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c6a1465e426c406594d245e77fbc8e84, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-40c5caa86ca64f2eb26027db7fdde0d0, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c30e7b4010f3414890cba0daff6950b7] into tmpdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp, totalSize=120.8 K 2024-11-07T12:31:57,313 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] compactions.Compactor(225): Compacting 3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1730982712434 2024-11-07T12:31:57,313 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c6a1465e426c406594d245e77fbc8e84, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1730982716544 2024-11-07T12:31:57,314 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-40c5caa86ca64f2eb26027db7fdde0d0, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1730982716568 2024-11-07T12:31:57,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc6971b5fdb896c599e17dcb1681742a, server=87fb68174c52,33325,1730982701549 because future has completed 2024-11-07T12:31:57,314 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c30e7b4010f3414890cba0daff6950b7, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1730982716593 2024-11-07T12:31:57,316 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 106fd831981b255f7952714d962d8fb9/info of 106fd831981b255f7952714d962d8fb9 into 74e7006d79884231b5f87026e4161fef(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:31:57,316 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 106fd831981b255f7952714d962d8fb9: 2024-11-07T12:31:57,316 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9., storeName=106fd831981b255f7952714d962d8fb9/info, priority=15, startTime=1730982717271; duration=0sec 2024-11-07T12:31:57,316 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:31:57,316 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 106fd831981b255f7952714d962d8fb9:info 2024-11-07T12:31:57,318 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-07T12:31:57,318 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure dc6971b5fdb896c599e17dcb1681742a, server=87fb68174c52,33325,1730982701549 in 218 msec 2024-11-07T12:31:57,320 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=7 2024-11-07T12:31:57,320 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=dc6971b5fdb896c599e17dcb1681742a, ASSIGN in 378 msec 2024-11-07T12:31:57,322 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=106fd831981b255f7952714d962d8fb9, daughterB=dc6971b5fdb896c599e17dcb1681742a in 702 msec 2024-11-07T12:31:57,322 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=1e3d57b452248e6c91d1ee036def4aa3, daughterB=5ed3dd24026e3aa397e68ef0b467b769 2024-11-07T12:31:57,322 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=1e3d57b452248e6c91d1ee036def4aa3, daughterB=5ed3dd24026e3aa397e68ef0b467b769 2024-11-07T12:31:57,322 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=1e3d57b452248e6c91d1ee036def4aa3, daughterB=5ed3dd24026e3aa397e68ef0b467b769 2024-11-07T12:31:57,323 INFO [PEWorker-2 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => ae2d13e7df9673fc8ab06b452d0d84b0, NAME => 'TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-07T12:31:57,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ae2d13e7df9673fc8ab06b452d0d84b0, daughterA=1e3d57b452248e6c91d1ee036def4aa3, daughterB=5ed3dd24026e3aa397e68ef0b467b769 in 698 msec 2024-11-07T12:31:57,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/ns/d1c4ff7a712143d284c5abcd9be55586 is 43, key is default/ns:d/1730982702333/Put/seqid=0 2024-11-07T12:31:57,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741853_1029 (size=5153) 2024-11-07T12:31:57,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741853_1029 (size=5153) 2024-11-07T12:31:57,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/ns/d1c4ff7a712143d284c5abcd9be55586 2024-11-07T12:31:57,343 INFO [RS:0;87fb68174c52:33325-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dc6971b5fdb896c599e17dcb1681742a#info#compaction#68 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:31:57,343 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/0de92193a88340359e5bf3a2ad3929cc is 1080, key is row0062/info:/1730982714536/Put/seqid=0 2024-11-07T12:31:57,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741854_1030 (size=43081) 2024-11-07T12:31:57,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741854_1030 (size=43081) 2024-11-07T12:31:57,353 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/0de92193a88340359e5bf3a2ad3929cc as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0de92193a88340359e5bf3a2ad3929cc 2024-11-07T12:31:57,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/table/5b76e15a6e2748c9ac4f23480073d182 is 65, key is TestLogRolling-testLogRolling/table:state/1730982702735/Put/seqid=0 2024-11-07T12:31:57,359 INFO [RS:0;87fb68174c52:33325-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in dc6971b5fdb896c599e17dcb1681742a/info of dc6971b5fdb896c599e17dcb1681742a into 0de92193a88340359e5bf3a2ad3929cc(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:31:57,359 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:31:57,359 INFO [RS:0;87fb68174c52:33325-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a., storeName=dc6971b5fdb896c599e17dcb1681742a/info, priority=12, startTime=1730982717308; duration=0sec 2024-11-07T12:31:57,359 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:31:57,359 DEBUG [RS:0;87fb68174c52:33325-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dc6971b5fdb896c599e17dcb1681742a:info 2024-11-07T12:31:57,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741855_1031 (size=5340) 2024-11-07T12:31:57,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741855_1031 (size=5340) 2024-11-07T12:31:57,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/table/5b76e15a6e2748c9ac4f23480073d182 2024-11-07T12:31:57,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/info/55ea5afab1a04dc5884a40179ae76642 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/info/55ea5afab1a04dc5884a40179ae76642 2024-11-07T12:31:57,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/info/55ea5afab1a04dc5884a40179ae76642, entries=30, sequenceid=17, filesize=9.6 K 2024-11-07T12:31:57,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/ns/d1c4ff7a712143d284c5abcd9be55586 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/ns/d1c4ff7a712143d284c5abcd9be55586 2024-11-07T12:31:57,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/ns/d1c4ff7a712143d284c5abcd9be55586, entries=2, sequenceid=17, filesize=5.0 K 2024-11-07T12:31:57,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/table/5b76e15a6e2748c9ac4f23480073d182 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/table/5b76e15a6e2748c9ac4f23480073d182 2024-11-07T12:31:57,383 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/table/5b76e15a6e2748c9ac4f23480073d182, entries=2, sequenceid=17, filesize=5.2 K 2024-11-07T12:31:57,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 106ms, sequenceid=17, compaction requested=false 2024-11-07T12:31:57,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-07T12:31:57,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:58,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:58,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:58,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:39846 deadline: 1730982728598, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. is not online on 87fb68174c52,33325,1730982701549 2024-11-07T12:31:58,623 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., hostname=87fb68174c52,33325,1730982701549, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., hostname=87fb68174c52,33325,1730982701549, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. is not online on 87fb68174c52,33325,1730982701549 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:31:58,623 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., hostname=87fb68174c52,33325,1730982701549, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0. is not online on 87fb68174c52,33325,1730982701549 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:31:58,624 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1730982702376.ae2d13e7df9673fc8ab06b452d0d84b0., hostname=87fb68174c52,33325,1730982701549, seqNum=2 from cache 2024-11-07T12:31:58,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:59,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:59,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:31:59,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:00,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:00,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:00,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:01,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:01,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:01,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:02,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:02,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:02,332 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:32:02,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:32:02,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:03,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:03,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:03,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:04,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:04,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:04,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:05,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:05,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:05,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:06,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:06,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:06,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:07,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:07,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:07,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:08,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:08,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:08,682 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a., hostname=87fb68174c52,33325,1730982701549, seqNum=127] 2024-11-07T12:32:08,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:08,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:32:08,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/5c935b76eb404dcc8bac30f9d1c8170f is 1080, key is row0097/info:/1730982728683/Put/seqid=0 2024-11-07T12:32:08,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741856_1032 (size=12516) 2024-11-07T12:32:08,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741856_1032 (size=12516) 2024-11-07T12:32:08,705 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/5c935b76eb404dcc8bac30f9d1c8170f 2024-11-07T12:32:08,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/5c935b76eb404dcc8bac30f9d1c8170f as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5c935b76eb404dcc8bac30f9d1c8170f 2024-11-07T12:32:08,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5c935b76eb404dcc8bac30f9d1c8170f, entries=7, sequenceid=137, filesize=12.2 K 2024-11-07T12:32:08,718 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for dc6971b5fdb896c599e17dcb1681742a in 25ms, sequenceid=137, compaction requested=false 2024-11-07T12:32:08,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:08,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-07T12:32:08,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/5ddc844d42464767b095ab4d7a0dc09d is 1080, key is row0104/info:/1730982728694/Put/seqid=0 2024-11-07T12:32:08,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741857_1033 (size=23316) 2024-11-07T12:32:08,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741857_1033 (size=23316) 2024-11-07T12:32:08,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/5ddc844d42464767b095ab4d7a0dc09d 2024-11-07T12:32:08,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/5ddc844d42464767b095ab4d7a0dc09d as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5ddc844d42464767b095ab4d7a0dc09d 2024-11-07T12:32:08,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5ddc844d42464767b095ab4d7a0dc09d, entries=17, sequenceid=157, filesize=22.8 K 2024-11-07T12:32:08,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=8.41 KB/8608 for dc6971b5fdb896c599e17dcb1681742a in 21ms, sequenceid=157, compaction requested=true 2024-11-07T12:32:08,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:08,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dc6971b5fdb896c599e17dcb1681742a:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:32:08,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:08,741 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:32:08,742 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 78913 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:32:08,742 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1541): dc6971b5fdb896c599e17dcb1681742a/info is initiating minor compaction (all files) 2024-11-07T12:32:08,742 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dc6971b5fdb896c599e17dcb1681742a/info in TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:08,742 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0de92193a88340359e5bf3a2ad3929cc, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5c935b76eb404dcc8bac30f9d1c8170f, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5ddc844d42464767b095ab4d7a0dc09d] into tmpdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp, totalSize=77.1 K 2024-11-07T12:32:08,742 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0de92193a88340359e5bf3a2ad3929cc, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1730982714536 2024-11-07T12:32:08,743 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5c935b76eb404dcc8bac30f9d1c8170f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1730982728683 2024-11-07T12:32:08,743 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5ddc844d42464767b095ab4d7a0dc09d, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730982728694 2024-11-07T12:32:08,754 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dc6971b5fdb896c599e17dcb1681742a#info#compaction#72 average throughput is 30.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:32:08,754 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/48d81f88be3948738442ee120042758e is 1080, key is row0062/info:/1730982714536/Put/seqid=0 2024-11-07T12:32:08,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741858_1034 (size=69123) 2024-11-07T12:32:08,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741858_1034 (size=69123) 2024-11-07T12:32:08,767 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/48d81f88be3948738442ee120042758e as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/48d81f88be3948738442ee120042758e 2024-11-07T12:32:08,772 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dc6971b5fdb896c599e17dcb1681742a/info of dc6971b5fdb896c599e17dcb1681742a into 48d81f88be3948738442ee120042758e(size=67.5 K), total size for store is 67.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:32:08,772 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:08,772 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a., storeName=dc6971b5fdb896c599e17dcb1681742a/info, priority=13, startTime=1730982728741; duration=0sec 2024-11-07T12:32:08,772 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:08,772 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dc6971b5fdb896c599e17dcb1681742a:info 2024-11-07T12:32:08,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:09,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:09,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:09,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:10,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:10,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:10,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:10,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-07T12:32:10,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/f45299089dbf48f8bbbb82af33116dcd is 1080, key is row0121/info:/1730982728721/Put/seqid=0 2024-11-07T12:32:10,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741859_1035 (size=14672) 2024-11-07T12:32:10,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741859_1035 (size=14672) 2024-11-07T12:32:10,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/f45299089dbf48f8bbbb82af33116dcd 2024-11-07T12:32:10,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/f45299089dbf48f8bbbb82af33116dcd as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/f45299089dbf48f8bbbb82af33116dcd 2024-11-07T12:32:10,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/f45299089dbf48f8bbbb82af33116dcd, entries=9, sequenceid=170, filesize=14.3 K 2024-11-07T12:32:10,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=12.61 KB/12912 for dc6971b5fdb896c599e17dcb1681742a in 21ms, sequenceid=170, compaction requested=false 2024-11-07T12:32:10,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:10,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:10,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-07T12:32:10,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/a8f001cc0e7742d48119fbe8133a5d7d is 1080, key is row0130/info:/1730982730737/Put/seqid=0 2024-11-07T12:32:10,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741860_1036 (size=19000) 2024-11-07T12:32:10,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741860_1036 (size=19000) 2024-11-07T12:32:10,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/a8f001cc0e7742d48119fbe8133a5d7d 2024-11-07T12:32:10,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/a8f001cc0e7742d48119fbe8133a5d7d as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a8f001cc0e7742d48119fbe8133a5d7d 2024-11-07T12:32:10,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a8f001cc0e7742d48119fbe8133a5d7d, entries=13, sequenceid=186, filesize=18.6 K 2024-11-07T12:32:10,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=15.76 KB/16140 for dc6971b5fdb896c599e17dcb1681742a in 24ms, sequenceid=186, compaction requested=true 2024-11-07T12:32:10,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:10,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dc6971b5fdb896c599e17dcb1681742a:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:32:10,782 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:32:10,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:10,783 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:32:10,783 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1541): dc6971b5fdb896c599e17dcb1681742a/info is initiating minor compaction (all files) 2024-11-07T12:32:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:10,783 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dc6971b5fdb896c599e17dcb1681742a/info in TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:10,783 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/48d81f88be3948738442ee120042758e, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/f45299089dbf48f8bbbb82af33116dcd, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a8f001cc0e7742d48119fbe8133a5d7d] into tmpdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp, totalSize=100.4 K 2024-11-07T12:32:10,783 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-07T12:32:10,784 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 48d81f88be3948738442ee120042758e, keycount=59, bloomtype=ROW, size=67.5 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730982714536 2024-11-07T12:32:10,784 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting f45299089dbf48f8bbbb82af33116dcd, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1730982728721 2024-11-07T12:32:10,785 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting a8f001cc0e7742d48119fbe8133a5d7d, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1730982730737 2024-11-07T12:32:10,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/0fce78b27c944b9ca07e052d654de501 is 1080, key is row0143/info:/1730982730759/Put/seqid=0 2024-11-07T12:32:10,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741861_1037 (size=22238) 2024-11-07T12:32:10,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741861_1037 (size=22238) 2024-11-07T12:32:10,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/0fce78b27c944b9ca07e052d654de501 2024-11-07T12:32:10,797 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dc6971b5fdb896c599e17dcb1681742a#info#compaction#76 average throughput is 41.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:32:10,798 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/981122f7be43431bb02390fdc8694a2c is 1080, key is row0062/info:/1730982714536/Put/seqid=0 2024-11-07T12:32:10,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/0fce78b27c944b9ca07e052d654de501 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0fce78b27c944b9ca07e052d654de501 2024-11-07T12:32:10,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741862_1038 (size=93018) 2024-11-07T12:32:10,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741862_1038 (size=93018) 2024-11-07T12:32:10,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0fce78b27c944b9ca07e052d654de501, entries=16, sequenceid=205, filesize=21.7 K 2024-11-07T12:32:10,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=2.10 KB/2152 for dc6971b5fdb896c599e17dcb1681742a in 23ms, sequenceid=205, compaction requested=false 2024-11-07T12:32:10,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:10,808 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/981122f7be43431bb02390fdc8694a2c as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/981122f7be43431bb02390fdc8694a2c 2024-11-07T12:32:10,814 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dc6971b5fdb896c599e17dcb1681742a/info of dc6971b5fdb896c599e17dcb1681742a into 981122f7be43431bb02390fdc8694a2c(size=90.8 K), total size for store is 112.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:32:10,814 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:10,814 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a., storeName=dc6971b5fdb896c599e17dcb1681742a/info, priority=13, startTime=1730982730782; duration=0sec 2024-11-07T12:32:10,814 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:10,814 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dc6971b5fdb896c599e17dcb1681742a:info 2024-11-07T12:32:10,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:11,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:11,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:11,479 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:32:11,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:12,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:12,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:12,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:32:12,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/8d3840c0035e438f8a8f5b1dccef0ae5 is 1080, key is row0159/info:/1730982730784/Put/seqid=0 2024-11-07T12:32:12,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741863_1039 (size=12516) 2024-11-07T12:32:12,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741863_1039 (size=12516) 2024-11-07T12:32:12,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/8d3840c0035e438f8a8f5b1dccef0ae5 2024-11-07T12:32:12,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/8d3840c0035e438f8a8f5b1dccef0ae5 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8d3840c0035e438f8a8f5b1dccef0ae5 2024-11-07T12:32:12,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8d3840c0035e438f8a8f5b1dccef0ae5, entries=7, sequenceid=216, filesize=12.2 K 2024-11-07T12:32:12,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for dc6971b5fdb896c599e17dcb1681742a in 21ms, sequenceid=216, compaction requested=true 2024-11-07T12:32:12,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:12,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dc6971b5fdb896c599e17dcb1681742a:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:32:12,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:12,817 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:32:12,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-07T12:32:12,818 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127772 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:32:12,819 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1541): dc6971b5fdb896c599e17dcb1681742a/info is initiating minor compaction (all files) 2024-11-07T12:32:12,819 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dc6971b5fdb896c599e17dcb1681742a/info in TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:12,819 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/981122f7be43431bb02390fdc8694a2c, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0fce78b27c944b9ca07e052d654de501, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8d3840c0035e438f8a8f5b1dccef0ae5] into tmpdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp, totalSize=124.8 K 2024-11-07T12:32:12,819 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 981122f7be43431bb02390fdc8694a2c, keycount=81, bloomtype=ROW, size=90.8 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1730982714536 2024-11-07T12:32:12,820 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0fce78b27c944b9ca07e052d654de501, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1730982730759 2024-11-07T12:32:12,820 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8d3840c0035e438f8a8f5b1dccef0ae5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1730982730784 2024-11-07T12:32:12,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/d59ac30f46f149d8909961747403a2c2 is 1080, key is row0166/info:/1730982732797/Put/seqid=0 2024-11-07T12:32:12,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741864_1040 (size=19000) 2024-11-07T12:32:12,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741864_1040 (size=19000) 2024-11-07T12:32:12,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/d59ac30f46f149d8909961747403a2c2 2024-11-07T12:32:12,833 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dc6971b5fdb896c599e17dcb1681742a#info#compaction#79 average throughput is 35.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:32:12,834 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/87ba3626248e435dbaaf2fc62b162b04 is 1080, key is row0062/info:/1730982714536/Put/seqid=0 2024-11-07T12:32:12,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/d59ac30f46f149d8909961747403a2c2 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d59ac30f46f149d8909961747403a2c2 2024-11-07T12:32:12,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741865_1041 (size=117918) 2024-11-07T12:32:12,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741865_1041 (size=117918) 2024-11-07T12:32:12,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d59ac30f46f149d8909961747403a2c2, entries=13, sequenceid=232, filesize=18.6 K 2024-11-07T12:32:12,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for dc6971b5fdb896c599e17dcb1681742a in 24ms, sequenceid=232, compaction requested=false 2024-11-07T12:32:12,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:12,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-07T12:32:12,844 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/87ba3626248e435dbaaf2fc62b162b04 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/87ba3626248e435dbaaf2fc62b162b04 2024-11-07T12:32:12,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/05bf492bfd894f7bb93240a34fb7b6e8 is 1080, key is row0179/info:/1730982732818/Put/seqid=0 2024-11-07T12:32:12,849 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dc6971b5fdb896c599e17dcb1681742a/info of dc6971b5fdb896c599e17dcb1681742a into 87ba3626248e435dbaaf2fc62b162b04(size=115.2 K), total size for store is 133.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:32:12,849 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:12,849 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a., storeName=dc6971b5fdb896c599e17dcb1681742a/info, priority=13, startTime=1730982732817; duration=0sec 2024-11-07T12:32:12,849 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:12,849 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dc6971b5fdb896c599e17dcb1681742a:info 2024-11-07T12:32:12,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741866_1042 (size=19000) 2024-11-07T12:32:12,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741866_1042 (size=19000) 2024-11-07T12:32:12,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/05bf492bfd894f7bb93240a34fb7b6e8 2024-11-07T12:32:12,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/05bf492bfd894f7bb93240a34fb7b6e8 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/05bf492bfd894f7bb93240a34fb7b6e8 2024-11-07T12:32:12,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/05bf492bfd894f7bb93240a34fb7b6e8, entries=13, sequenceid=248, filesize=18.6 K 2024-11-07T12:32:12,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for dc6971b5fdb896c599e17dcb1681742a in 19ms, sequenceid=248, compaction requested=true 2024-11-07T12:32:12,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:12,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dc6971b5fdb896c599e17dcb1681742a:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:32:12,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:12,860 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:32:12,861 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 155918 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:32:12,861 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1541): dc6971b5fdb896c599e17dcb1681742a/info is initiating minor compaction (all files) 2024-11-07T12:32:12,861 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dc6971b5fdb896c599e17dcb1681742a/info in TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:12,861 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/87ba3626248e435dbaaf2fc62b162b04, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d59ac30f46f149d8909961747403a2c2, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/05bf492bfd894f7bb93240a34fb7b6e8] into tmpdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp, totalSize=152.3 K 2024-11-07T12:32:12,861 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 87ba3626248e435dbaaf2fc62b162b04, keycount=104, bloomtype=ROW, size=115.2 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1730982714536 2024-11-07T12:32:12,861 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting d59ac30f46f149d8909961747403a2c2, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1730982732797 2024-11-07T12:32:12,862 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 05bf492bfd894f7bb93240a34fb7b6e8, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1730982732818 2024-11-07T12:32:12,871 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dc6971b5fdb896c599e17dcb1681742a#info#compaction#81 average throughput is 66.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:32:12,872 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/37b2c32480fd4f9490a008934f273322 is 1080, key is row0062/info:/1730982714536/Put/seqid=0 2024-11-07T12:32:12,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741867_1043 (size=146253) 2024-11-07T12:32:12,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741867_1043 (size=146253) 2024-11-07T12:32:12,880 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/37b2c32480fd4f9490a008934f273322 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/37b2c32480fd4f9490a008934f273322 2024-11-07T12:32:12,885 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dc6971b5fdb896c599e17dcb1681742a/info of dc6971b5fdb896c599e17dcb1681742a into 37b2c32480fd4f9490a008934f273322(size=142.8 K), total size for store is 142.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:32:12,885 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:12,885 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a., storeName=dc6971b5fdb896c599e17dcb1681742a/info, priority=13, startTime=1730982732860; duration=0sec 2024-11-07T12:32:12,885 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:12,885 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dc6971b5fdb896c599e17dcb1681742a:info 2024-11-07T12:32:12,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:13,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:13,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:13,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:14,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:14,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:14,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:14,853 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:32:14,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/d9428265eb20441d8a13b7b2198e7a14 is 1080, key is row0192/info:/1730982732842/Put/seqid=0 2024-11-07T12:32:14,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741868_1044 (size=12520) 2024-11-07T12:32:14,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741868_1044 (size=12520) 2024-11-07T12:32:14,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/d9428265eb20441d8a13b7b2198e7a14 2024-11-07T12:32:14,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/d9428265eb20441d8a13b7b2198e7a14 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d9428265eb20441d8a13b7b2198e7a14 2024-11-07T12:32:14,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d9428265eb20441d8a13b7b2198e7a14, entries=7, sequenceid=260, filesize=12.2 K 2024-11-07T12:32:14,874 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for dc6971b5fdb896c599e17dcb1681742a in 21ms, sequenceid=260, compaction requested=false 2024-11-07T12:32:14,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:14,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:14,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-07T12:32:14,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/8bb9e7381de04a4485a1a51cdae57785 is 1080, key is row0199/info:/1730982734854/Put/seqid=0 2024-11-07T12:32:14,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741869_1045 (size=21171) 2024-11-07T12:32:14,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741869_1045 (size=21171) 2024-11-07T12:32:14,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/8bb9e7381de04a4485a1a51cdae57785 2024-11-07T12:32:14,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:14,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/8bb9e7381de04a4485a1a51cdae57785 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8bb9e7381de04a4485a1a51cdae57785 2024-11-07T12:32:14,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8bb9e7381de04a4485a1a51cdae57785, entries=15, sequenceid=278, filesize=20.7 K 2024-11-07T12:32:14,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for dc6971b5fdb896c599e17dcb1681742a in 20ms, sequenceid=278, compaction requested=true 2024-11-07T12:32:14,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:14,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dc6971b5fdb896c599e17dcb1681742a:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:32:14,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:14,895 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:32:14,896 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 179944 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:32:14,896 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1541): dc6971b5fdb896c599e17dcb1681742a/info is initiating minor compaction (all files) 2024-11-07T12:32:14,896 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dc6971b5fdb896c599e17dcb1681742a/info in TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:14,896 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/37b2c32480fd4f9490a008934f273322, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d9428265eb20441d8a13b7b2198e7a14, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8bb9e7381de04a4485a1a51cdae57785] into tmpdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp, totalSize=175.7 K 2024-11-07T12:32:14,897 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 37b2c32480fd4f9490a008934f273322, keycount=130, bloomtype=ROW, size=142.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1730982714536 2024-11-07T12:32:14,897 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting d9428265eb20441d8a13b7b2198e7a14, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1730982732842 2024-11-07T12:32:14,897 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8bb9e7381de04a4485a1a51cdae57785, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1730982734854 2024-11-07T12:32:14,907 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dc6971b5fdb896c599e17dcb1681742a#info#compaction#84 average throughput is 51.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:32:14,907 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/731bcac1eb70444cab6c07d77d69b473 is 1080, key is row0062/info:/1730982714536/Put/seqid=0 2024-11-07T12:32:14,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741870_1046 (size=170094) 2024-11-07T12:32:14,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741870_1046 (size=170094) 2024-11-07T12:32:14,915 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/731bcac1eb70444cab6c07d77d69b473 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/731bcac1eb70444cab6c07d77d69b473 2024-11-07T12:32:14,920 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dc6971b5fdb896c599e17dcb1681742a/info of dc6971b5fdb896c599e17dcb1681742a into 731bcac1eb70444cab6c07d77d69b473(size=166.1 K), total size for store is 166.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:32:14,920 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:14,920 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a., storeName=dc6971b5fdb896c599e17dcb1681742a/info, priority=13, startTime=1730982734895; duration=0sec 2024-11-07T12:32:14,921 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:14,921 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dc6971b5fdb896c599e17dcb1681742a:info 2024-11-07T12:32:15,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:15,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:15,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:16,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:16,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:16,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:16,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:16,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-07T12:32:16,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/a60058c8aebb44b090930dabb6d7cb35 is 1080, key is row0214/info:/1730982734876/Put/seqid=0 2024-11-07T12:32:16,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741871_1047 (size=17918) 2024-11-07T12:32:16,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741871_1047 (size=17918) 2024-11-07T12:32:16,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/a60058c8aebb44b090930dabb6d7cb35 2024-11-07T12:32:16,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/a60058c8aebb44b090930dabb6d7cb35 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a60058c8aebb44b090930dabb6d7cb35 2024-11-07T12:32:16,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a60058c8aebb44b090930dabb6d7cb35, entries=12, sequenceid=294, filesize=17.5 K 2024-11-07T12:32:16,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for dc6971b5fdb896c599e17dcb1681742a in 20ms, sequenceid=294, compaction requested=false 2024-11-07T12:32:16,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:16,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:16,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-07T12:32:16,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/347d80181b6341bfbf745434020ded32 is 1080, key is row0226/info:/1730982736895/Put/seqid=0 2024-11-07T12:32:16,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741872_1048 (size=20092) 2024-11-07T12:32:16,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741872_1048 (size=20092) 2024-11-07T12:32:16,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/347d80181b6341bfbf745434020ded32 2024-11-07T12:32:16,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/347d80181b6341bfbf745434020ded32 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/347d80181b6341bfbf745434020ded32 2024-11-07T12:32:16,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/347d80181b6341bfbf745434020ded32, entries=14, sequenceid=311, filesize=19.6 K 2024-11-07T12:32:16,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for dc6971b5fdb896c599e17dcb1681742a in 20ms, sequenceid=311, compaction requested=true 2024-11-07T12:32:16,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:16,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dc6971b5fdb896c599e17dcb1681742a:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:32:16,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:16,935 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:32:16,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33325 {}] regionserver.HRegion(8855): Flush requested on dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:16,936 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-07T12:32:16,936 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 208104 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:32:16,936 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1541): dc6971b5fdb896c599e17dcb1681742a/info is initiating minor compaction (all files) 2024-11-07T12:32:16,936 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dc6971b5fdb896c599e17dcb1681742a/info in TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:16,936 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/731bcac1eb70444cab6c07d77d69b473, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a60058c8aebb44b090930dabb6d7cb35, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/347d80181b6341bfbf745434020ded32] into tmpdir=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp, totalSize=203.2 K 2024-11-07T12:32:16,937 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 731bcac1eb70444cab6c07d77d69b473, keycount=152, bloomtype=ROW, size=166.1 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1730982714536 2024-11-07T12:32:16,937 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting a60058c8aebb44b090930dabb6d7cb35, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1730982734876 2024-11-07T12:32:16,937 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] compactions.Compactor(225): Compacting 347d80181b6341bfbf745434020ded32, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1730982736895 2024-11-07T12:32:16,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/54a90e1319164948baccf37b1c218b51 is 1080, key is row0240/info:/1730982736916/Put/seqid=0 2024-11-07T12:32:16,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741873_1049 (size=19013) 2024-11-07T12:32:16,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741873_1049 (size=19013) 2024-11-07T12:32:16,945 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/54a90e1319164948baccf37b1c218b51 2024-11-07T12:32:16,949 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dc6971b5fdb896c599e17dcb1681742a#info#compaction#88 average throughput is 60.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:32:16,949 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/b810996a1eb1479e8d7f090c0ec97d07 is 1080, key is row0062/info:/1730982714536/Put/seqid=0 2024-11-07T12:32:16,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/54a90e1319164948baccf37b1c218b51 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/54a90e1319164948baccf37b1c218b51 2024-11-07T12:32:16,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741874_1050 (size=198258) 2024-11-07T12:32:16,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741874_1050 (size=198258) 2024-11-07T12:32:16,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/54a90e1319164948baccf37b1c218b51, entries=13, sequenceid=327, filesize=18.6 K 2024-11-07T12:32:16,956 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for dc6971b5fdb896c599e17dcb1681742a in 20ms, sequenceid=327, compaction requested=false 2024-11-07T12:32:16,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:16,957 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/b810996a1eb1479e8d7f090c0ec97d07 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/b810996a1eb1479e8d7f090c0ec97d07 2024-11-07T12:32:16,962 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dc6971b5fdb896c599e17dcb1681742a/info of dc6971b5fdb896c599e17dcb1681742a into b810996a1eb1479e8d7f090c0ec97d07(size=193.6 K), total size for store is 212.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:32:16,962 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:16,962 INFO [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a., storeName=dc6971b5fdb896c599e17dcb1681742a/info, priority=13, startTime=1730982736935; duration=0sec 2024-11-07T12:32:16,962 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:32:16,962 DEBUG [RS:0;87fb68174c52:33325-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dc6971b5fdb896c599e17dcb1681742a:info 2024-11-07T12:32:17,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:17,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:17,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:18,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:18,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:18,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:18,942 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-07T12:32:18,943 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33325%2C1730982701549.1730982738943 2024-11-07T12:32:18,948 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:18,948 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:18,948 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:18,948 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:18,948 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:18,948 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549/87fb68174c52%2C33325%2C1730982701549.1730982701927 with entries=312, filesize=308.51 KB; new WAL /user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549/87fb68174c52%2C33325%2C1730982701549.1730982738943 2024-11-07T12:32:18,949 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46835:46835),(127.0.0.1/127.0.0.1:37931:37931)] 2024-11-07T12:32:18,949 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549/87fb68174c52%2C33325%2C1730982701549.1730982701927 is not closed yet, will try archiving it next time 2024-11-07T12:32:18,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741833_1009 (size=315921) 2024-11-07T12:32:18,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741833_1009 (size=315921) 2024-11-07T12:32:18,952 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing dc6971b5fdb896c599e17dcb1681742a 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-07T12:32:18,956 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/376a353a3cfd4dce9d70a22edce8c846 is 1080, key is row0253/info:/1730982736936/Put/seqid=0 2024-11-07T12:32:18,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741876_1052 (size=9278) 2024-11-07T12:32:18,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741876_1052 (size=9278) 2024-11-07T12:32:18,960 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/376a353a3cfd4dce9d70a22edce8c846 2024-11-07T12:32:18,965 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/.tmp/info/376a353a3cfd4dce9d70a22edce8c846 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/376a353a3cfd4dce9d70a22edce8c846 2024-11-07T12:32:18,969 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/376a353a3cfd4dce9d70a22edce8c846, entries=4, sequenceid=335, filesize=9.1 K 2024-11-07T12:32:18,970 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for dc6971b5fdb896c599e17dcb1681742a in 18ms, sequenceid=335, compaction requested=true 2024-11-07T12:32:18,970 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for dc6971b5fdb896c599e17dcb1681742a: 2024-11-07T12:32:18,970 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-07T12:32:18,974 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/info/2ee1e08ed9034abe9904b8d63f2ecb4c is 193, key is TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a./info:regioninfo/1730982717311/Put/seqid=0 2024-11-07T12:32:18,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741877_1053 (size=6223) 2024-11-07T12:32:18,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741877_1053 (size=6223) 2024-11-07T12:32:18,979 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/info/2ee1e08ed9034abe9904b8d63f2ecb4c 2024-11-07T12:32:18,983 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/.tmp/info/2ee1e08ed9034abe9904b8d63f2ecb4c as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/info/2ee1e08ed9034abe9904b8d63f2ecb4c 2024-11-07T12:32:18,987 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/info/2ee1e08ed9034abe9904b8d63f2ecb4c, entries=5, sequenceid=21, filesize=6.1 K 2024-11-07T12:32:18,988 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 18ms, sequenceid=21, compaction requested=false 2024-11-07T12:32:18,988 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-07T12:32:18,988 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 106fd831981b255f7952714d962d8fb9: 2024-11-07T12:32:18,988 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C33325%2C1730982701549.1730982738988 2024-11-07T12:32:18,992 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:18,992 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:18,992 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:18,992 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:18,992 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:18,993 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549/87fb68174c52%2C33325%2C1730982701549.1730982738943 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549/87fb68174c52%2C33325%2C1730982701549.1730982738988 2024-11-07T12:32:18,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741875_1051 (size=731) 2024-11-07T12:32:18,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741875_1051 (size=731) 2024-11-07T12:32:18,997 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549/87fb68174c52%2C33325%2C1730982701549.1730982701927 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/oldWALs/87fb68174c52%2C33325%2C1730982701549.1730982701927 2024-11-07T12:32:18,998 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/WALs/87fb68174c52,33325,1730982701549/87fb68174c52%2C33325%2C1730982701549.1730982738943 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/oldWALs/87fb68174c52%2C33325%2C1730982701549.1730982738943 2024-11-07T12:32:19,000 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37931:37931),(127.0.0.1/127.0.0.1:46835:46835)] 2024-11-07T12:32:19,001 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T12:32:19,001 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:32:19,001 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:32:19,001 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:32:19,001 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:19,001 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:19,001 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:32:19,001 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:32:19,001 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1280411139, stopped=false 2024-11-07T12:32:19,001 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87fb68174c52,37673,1730982701497 2024-11-07T12:32:19,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:32:19,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:32:19,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:19,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:19,003 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:32:19,003 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:32:19,003 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:32:19,004 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:19,004 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87fb68174c52,33325,1730982701549' ***** 2024-11-07T12:32:19,004 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:32:19,004 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:32:19,004 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:32:19,004 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:32:19,004 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:32:19,004 INFO [RS:0;87fb68174c52:33325 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:32:19,004 INFO [RS:0;87fb68174c52:33325 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:32:19,005 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(3091): Received CLOSE for dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:19,005 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(3091): Received CLOSE for 106fd831981b255f7952714d962d8fb9 2024-11-07T12:32:19,005 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(959): stopping server 87fb68174c52,33325,1730982701549 2024-11-07T12:32:19,005 INFO [RS:0;87fb68174c52:33325 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:32:19,005 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing dc6971b5fdb896c599e17dcb1681742a, disabling compactions & flushes 2024-11-07T12:32:19,005 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:19,005 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:19,005 INFO [RS:0;87fb68174c52:33325 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87fb68174c52:33325. 2024-11-07T12:32:19,005 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. after waiting 0 ms 2024-11-07T12:32:19,005 DEBUG [RS:0;87fb68174c52:33325 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:32:19,005 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:19,005 DEBUG [RS:0;87fb68174c52:33325 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:19,005 INFO [RS:0;87fb68174c52:33325 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:32:19,005 INFO [RS:0;87fb68174c52:33325 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:32:19,005 INFO [RS:0;87fb68174c52:33325 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:32:19,005 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:32:19,006 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-07T12:32:19,006 DEBUG [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(1325): Online Regions={dc6971b5fdb896c599e17dcb1681742a=TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a., 1588230740=hbase:meta,,1.1588230740, 106fd831981b255f7952714d962d8fb9=TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9.} 2024-11-07T12:32:19,006 DEBUG [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(1351): Waiting on 106fd831981b255f7952714d962d8fb9, 1588230740, dc6971b5fdb896c599e17dcb1681742a 2024-11-07T12:32:19,007 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:32:19,007 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:32:19,007 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:32:19,007 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:32:19,007 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:32:19,007 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0->hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/3efa58aa2e4146bc8c97c59d8e485173-top, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c6a1465e426c406594d245e77fbc8e84, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-40c5caa86ca64f2eb26027db7fdde0d0, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0de92193a88340359e5bf3a2ad3929cc, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c30e7b4010f3414890cba0daff6950b7, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5c935b76eb404dcc8bac30f9d1c8170f, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/48d81f88be3948738442ee120042758e, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5ddc844d42464767b095ab4d7a0dc09d, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/f45299089dbf48f8bbbb82af33116dcd, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/981122f7be43431bb02390fdc8694a2c, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a8f001cc0e7742d48119fbe8133a5d7d, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0fce78b27c944b9ca07e052d654de501, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/87ba3626248e435dbaaf2fc62b162b04, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8d3840c0035e438f8a8f5b1dccef0ae5, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d59ac30f46f149d8909961747403a2c2, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/37b2c32480fd4f9490a008934f273322, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/05bf492bfd894f7bb93240a34fb7b6e8, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d9428265eb20441d8a13b7b2198e7a14, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/731bcac1eb70444cab6c07d77d69b473, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8bb9e7381de04a4485a1a51cdae57785, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a60058c8aebb44b090930dabb6d7cb35, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/347d80181b6341bfbf745434020ded32] to archive 2024-11-07T12:32:19,008 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:32:19,009 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:32:19,011 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c6a1465e426c406594d245e77fbc8e84 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c6a1465e426c406594d245e77fbc8e84 2024-11-07T12:32:19,012 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-07T12:32:19,012 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-40c5caa86ca64f2eb26027db7fdde0d0 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-40c5caa86ca64f2eb26027db7fdde0d0 2024-11-07T12:32:19,012 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:32:19,013 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:32:19,013 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982739007Running coprocessor pre-close hooks at 1730982739007Disabling compacts and flushes for region at 1730982739007Disabling writes for close at 1730982739007Writing region close event to WAL at 1730982739009 (+2 ms)Running coprocessor post-close hooks at 1730982739012 (+3 ms)Closed at 1730982739013 (+1 ms) 2024-11-07T12:32:19,013 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:32:19,013 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0de92193a88340359e5bf3a2ad3929cc to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0de92193a88340359e5bf3a2ad3929cc 2024-11-07T12:32:19,014 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c30e7b4010f3414890cba0daff6950b7 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/TestLogRolling-testLogRolling=ae2d13e7df9673fc8ab06b452d0d84b0-c30e7b4010f3414890cba0daff6950b7 2024-11-07T12:32:19,015 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5c935b76eb404dcc8bac30f9d1c8170f to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5c935b76eb404dcc8bac30f9d1c8170f 2024-11-07T12:32:19,016 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/48d81f88be3948738442ee120042758e to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/48d81f88be3948738442ee120042758e 2024-11-07T12:32:19,017 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5ddc844d42464767b095ab4d7a0dc09d to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/5ddc844d42464767b095ab4d7a0dc09d 2024-11-07T12:32:19,018 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/f45299089dbf48f8bbbb82af33116dcd to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/f45299089dbf48f8bbbb82af33116dcd 2024-11-07T12:32:19,019 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/981122f7be43431bb02390fdc8694a2c to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/981122f7be43431bb02390fdc8694a2c 2024-11-07T12:32:19,020 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a8f001cc0e7742d48119fbe8133a5d7d to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a8f001cc0e7742d48119fbe8133a5d7d 2024-11-07T12:32:19,021 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0fce78b27c944b9ca07e052d654de501 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/0fce78b27c944b9ca07e052d654de501 2024-11-07T12:32:19,022 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/87ba3626248e435dbaaf2fc62b162b04 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/87ba3626248e435dbaaf2fc62b162b04 2024-11-07T12:32:19,023 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8d3840c0035e438f8a8f5b1dccef0ae5 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8d3840c0035e438f8a8f5b1dccef0ae5 2024-11-07T12:32:19,024 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d59ac30f46f149d8909961747403a2c2 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d59ac30f46f149d8909961747403a2c2 2024-11-07T12:32:19,025 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/37b2c32480fd4f9490a008934f273322 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/37b2c32480fd4f9490a008934f273322 2024-11-07T12:32:19,026 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/05bf492bfd894f7bb93240a34fb7b6e8 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/05bf492bfd894f7bb93240a34fb7b6e8 2024-11-07T12:32:19,027 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d9428265eb20441d8a13b7b2198e7a14 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/d9428265eb20441d8a13b7b2198e7a14 2024-11-07T12:32:19,028 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/731bcac1eb70444cab6c07d77d69b473 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/731bcac1eb70444cab6c07d77d69b473 2024-11-07T12:32:19,029 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8bb9e7381de04a4485a1a51cdae57785 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/8bb9e7381de04a4485a1a51cdae57785 2024-11-07T12:32:19,030 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a60058c8aebb44b090930dabb6d7cb35 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/a60058c8aebb44b090930dabb6d7cb35 2024-11-07T12:32:19,031 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/347d80181b6341bfbf745434020ded32 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/info/347d80181b6341bfbf745434020ded32 2024-11-07T12:32:19,031 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=87fb68174c52:37673 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-07T12:32:19,031 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0de92193a88340359e5bf3a2ad3929cc=43081, 5c935b76eb404dcc8bac30f9d1c8170f=12516, 48d81f88be3948738442ee120042758e=69123, 5ddc844d42464767b095ab4d7a0dc09d=23316, f45299089dbf48f8bbbb82af33116dcd=14672, 981122f7be43431bb02390fdc8694a2c=93018, a8f001cc0e7742d48119fbe8133a5d7d=19000, 0fce78b27c944b9ca07e052d654de501=22238, 87ba3626248e435dbaaf2fc62b162b04=117918, 8d3840c0035e438f8a8f5b1dccef0ae5=12516, d59ac30f46f149d8909961747403a2c2=19000, 37b2c32480fd4f9490a008934f273322=146253, 05bf492bfd894f7bb93240a34fb7b6e8=19000, d9428265eb20441d8a13b7b2198e7a14=12520, 731bcac1eb70444cab6c07d77d69b473=170094, 8bb9e7381de04a4485a1a51cdae57785=21171, a60058c8aebb44b090930dabb6d7cb35=17918, 347d80181b6341bfbf745434020ded32=20092] 2024-11-07T12:32:19,034 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/dc6971b5fdb896c599e17dcb1681742a/recovered.edits/338.seqid, newMaxSeqId=338, maxSeqId=126 2024-11-07T12:32:19,035 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:19,035 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for dc6971b5fdb896c599e17dcb1681742a: Waiting for close lock at 1730982739005Running coprocessor pre-close hooks at 1730982739005Disabling compacts and flushes for region at 1730982739005Disabling writes for close at 1730982739005Writing region close event to WAL at 1730982739032 (+27 ms)Running coprocessor post-close hooks at 1730982739035 (+3 ms)Closed at 1730982739035 2024-11-07T12:32:19,035 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1730982716618.dc6971b5fdb896c599e17dcb1681742a. 2024-11-07T12:32:19,035 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 106fd831981b255f7952714d962d8fb9, disabling compactions & flushes 2024-11-07T12:32:19,035 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. 2024-11-07T12:32:19,035 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. 2024-11-07T12:32:19,035 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. after waiting 0 ms 2024-11-07T12:32:19,035 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. 2024-11-07T12:32:19,035 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0->hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/ae2d13e7df9673fc8ab06b452d0d84b0/info/3efa58aa2e4146bc8c97c59d8e485173-bottom] to archive 2024-11-07T12:32:19,036 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:32:19,037 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0 to hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/archive/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/info/3efa58aa2e4146bc8c97c59d8e485173.ae2d13e7df9673fc8ab06b452d0d84b0 2024-11-07T12:32:19,037 WARN [StoreCloser-TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-07T12:32:19,040 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/data/default/TestLogRolling-testLogRolling/106fd831981b255f7952714d962d8fb9/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-07T12:32:19,041 INFO [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. 2024-11-07T12:32:19,041 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 106fd831981b255f7952714d962d8fb9: Waiting for close lock at 1730982739035Running coprocessor pre-close hooks at 1730982739035Disabling compacts and flushes for region at 1730982739035Disabling writes for close at 1730982739035Writing region close event to WAL at 1730982739038 (+3 ms)Running coprocessor post-close hooks at 1730982739041 (+3 ms)Closed at 1730982739041 2024-11-07T12:32:19,041 DEBUG [RS_CLOSE_REGION-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1730982716618.106fd831981b255f7952714d962d8fb9. 2024-11-07T12:32:19,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:19,207 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(976): stopping server 87fb68174c52,33325,1730982701549; all regions closed. 2024-11-07T12:32:19,207 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,207 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,207 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,207 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,208 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741834_1010 (size=8107) 2024-11-07T12:32:19,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741834_1010 (size=8107) 2024-11-07T12:32:19,212 DEBUG [RS:0;87fb68174c52:33325 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/oldWALs 2024-11-07T12:32:19,212 INFO [RS:0;87fb68174c52:33325 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C33325%2C1730982701549.meta:.meta(num 1730982702282) 2024-11-07T12:32:19,213 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,213 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,213 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,213 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,213 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741878_1054 (size=780) 2024-11-07T12:32:19,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741878_1054 (size=780) 2024-11-07T12:32:19,217 DEBUG [RS:0;87fb68174c52:33325 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/oldWALs 2024-11-07T12:32:19,217 INFO [RS:0;87fb68174c52:33325 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C33325%2C1730982701549:(num 1730982738988) 2024-11-07T12:32:19,217 DEBUG [RS:0;87fb68174c52:33325 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:19,217 INFO [RS:0;87fb68174c52:33325 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:32:19,217 INFO [RS:0;87fb68174c52:33325 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:32:19,217 INFO [RS:0;87fb68174c52:33325 {}] hbase.ChoreService(370): Chore service for: regionserver/87fb68174c52:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-07T12:32:19,217 INFO [RS:0;87fb68174c52:33325 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:32:19,217 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:32:19,217 INFO [RS:0;87fb68174c52:33325 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33325 2024-11-07T12:32:19,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:32:19,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87fb68174c52,33325,1730982701549 2024-11-07T12:32:19,219 INFO [RS:0;87fb68174c52:33325 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:32:19,220 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87fb68174c52,33325,1730982701549] 2024-11-07T12:32:19,221 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87fb68174c52,33325,1730982701549 already deleted, retry=false 2024-11-07T12:32:19,221 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87fb68174c52,33325,1730982701549 expired; onlineServers=0 2024-11-07T12:32:19,221 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87fb68174c52,37673,1730982701497' ***** 2024-11-07T12:32:19,222 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:32:19,222 INFO [M:0;87fb68174c52:37673 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:32:19,222 INFO [M:0;87fb68174c52:37673 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:32:19,222 DEBUG [M:0;87fb68174c52:37673 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:32:19,222 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:32:19,222 DEBUG [M:0;87fb68174c52:37673 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:32:19,222 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982701684 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982701684,5,FailOnTimeoutGroup] 2024-11-07T12:32:19,222 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982701684 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982701684,5,FailOnTimeoutGroup] 2024-11-07T12:32:19,222 INFO [M:0;87fb68174c52:37673 {}] hbase.ChoreService(370): Chore service for: master/87fb68174c52:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:32:19,222 INFO [M:0;87fb68174c52:37673 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:32:19,222 DEBUG [M:0;87fb68174c52:37673 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:32:19,222 INFO [M:0;87fb68174c52:37673 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:32:19,222 INFO [M:0;87fb68174c52:37673 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:32:19,222 INFO [M:0;87fb68174c52:37673 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:32:19,223 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:32:19,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:32:19,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:19,224 DEBUG [M:0;87fb68174c52:37673 {}] zookeeper.ZKUtil(347): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:32:19,224 WARN [M:0;87fb68174c52:37673 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:32:19,224 INFO [M:0;87fb68174c52:37673 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/.lastflushedseqids 2024-11-07T12:32:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741879_1055 (size=228) 2024-11-07T12:32:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741879_1055 (size=228) 2024-11-07T12:32:19,229 INFO [M:0;87fb68174c52:37673 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:32:19,230 INFO [M:0;87fb68174c52:37673 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:32:19,230 DEBUG [M:0;87fb68174c52:37673 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:32:19,230 INFO [M:0;87fb68174c52:37673 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:19,230 DEBUG [M:0;87fb68174c52:37673 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:19,230 DEBUG [M:0;87fb68174c52:37673 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:32:19,230 DEBUG [M:0;87fb68174c52:37673 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:19,230 INFO [M:0;87fb68174c52:37673 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.70 KB heapSize=65.92 KB 2024-11-07T12:32:19,252 DEBUG [M:0;87fb68174c52:37673 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/259238b146774f67b7b31bd25b5d5636 is 82, key is hbase:meta,,1/info:regioninfo/1730982702319/Put/seqid=0 2024-11-07T12:32:19,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741880_1056 (size=5672) 2024-11-07T12:32:19,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741880_1056 (size=5672) 2024-11-07T12:32:19,256 INFO [M:0;87fb68174c52:37673 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/259238b146774f67b7b31bd25b5d5636 2024-11-07T12:32:19,280 DEBUG [M:0;87fb68174c52:37673 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f74ccd9a006e41d48eba9222a1162d98 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730982702740/Put/seqid=0 2024-11-07T12:32:19,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741881_1057 (size=7680) 2024-11-07T12:32:19,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:19,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741881_1057 (size=7680) 2024-11-07T12:32:19,290 INFO [M:0;87fb68174c52:37673 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.09 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f74ccd9a006e41d48eba9222a1162d98 2024-11-07T12:32:19,295 INFO [M:0;87fb68174c52:37673 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f74ccd9a006e41d48eba9222a1162d98 2024-11-07T12:32:19,308 DEBUG [M:0;87fb68174c52:37673 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6413a0edc9ea4be38fabc900158aa12d is 69, key is 87fb68174c52,33325,1730982701549/rs:state/1730982701784/Put/seqid=0 2024-11-07T12:32:19,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741882_1058 (size=5156) 2024-11-07T12:32:19,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741882_1058 (size=5156) 2024-11-07T12:32:19,313 INFO [M:0;87fb68174c52:37673 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6413a0edc9ea4be38fabc900158aa12d 2024-11-07T12:32:19,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:32:19,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33325-0x1001a39f4470001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:32:19,320 INFO [RS:0;87fb68174c52:33325 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:32:19,321 INFO [RS:0;87fb68174c52:33325 {}] regionserver.HRegionServer(1031): Exiting; stopping=87fb68174c52,33325,1730982701549; zookeeper connection closed. 2024-11-07T12:32:19,321 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@60ee7d80 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@60ee7d80 2024-11-07T12:32:19,321 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:32:19,331 DEBUG [M:0;87fb68174c52:37673 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/027da4c289364d7eb46d5f36a9e01d80 is 52, key is load_balancer_on/state:d/1730982702373/Put/seqid=0 2024-11-07T12:32:19,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741883_1059 (size=5056) 2024-11-07T12:32:19,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741883_1059 (size=5056) 2024-11-07T12:32:19,336 INFO [M:0;87fb68174c52:37673 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/027da4c289364d7eb46d5f36a9e01d80 2024-11-07T12:32:19,341 DEBUG [M:0;87fb68174c52:37673 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/259238b146774f67b7b31bd25b5d5636 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/259238b146774f67b7b31bd25b5d5636 2024-11-07T12:32:19,345 INFO [M:0;87fb68174c52:37673 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/259238b146774f67b7b31bd25b5d5636, entries=8, sequenceid=129, filesize=5.5 K 2024-11-07T12:32:19,345 DEBUG [M:0;87fb68174c52:37673 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f74ccd9a006e41d48eba9222a1162d98 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f74ccd9a006e41d48eba9222a1162d98 2024-11-07T12:32:19,349 INFO [M:0;87fb68174c52:37673 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f74ccd9a006e41d48eba9222a1162d98 2024-11-07T12:32:19,349 INFO [M:0;87fb68174c52:37673 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f74ccd9a006e41d48eba9222a1162d98, entries=14, sequenceid=129, filesize=7.5 K 2024-11-07T12:32:19,350 DEBUG [M:0;87fb68174c52:37673 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6413a0edc9ea4be38fabc900158aa12d as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6413a0edc9ea4be38fabc900158aa12d 2024-11-07T12:32:19,354 INFO [M:0;87fb68174c52:37673 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6413a0edc9ea4be38fabc900158aa12d, entries=1, sequenceid=129, filesize=5.0 K 2024-11-07T12:32:19,355 DEBUG [M:0;87fb68174c52:37673 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/027da4c289364d7eb46d5f36a9e01d80 as hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/027da4c289364d7eb46d5f36a9e01d80 2024-11-07T12:32:19,359 INFO [M:0;87fb68174c52:37673 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35145/user/jenkins/test-data/23f517f4-e795-0e24-5579-bf8ddd7a528a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/027da4c289364d7eb46d5f36a9e01d80, entries=1, sequenceid=129, filesize=4.9 K 2024-11-07T12:32:19,360 INFO [M:0;87fb68174c52:37673 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=129, compaction requested=false 2024-11-07T12:32:19,361 INFO [M:0;87fb68174c52:37673 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:19,361 DEBUG [M:0;87fb68174c52:37673 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982739230Disabling compacts and flushes for region at 1730982739230Disabling writes for close at 1730982739230Obtaining lock to block concurrent updates at 1730982739230Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730982739230Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54985, getHeapSize=67440, getOffHeapSize=0, getCellsCount=152 at 1730982739230Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730982739231 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730982739231Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730982739251 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730982739251Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730982739261 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730982739279 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730982739280 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730982739295 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730982739308 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730982739308Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730982739317 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730982739331 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730982739331Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ae74e3e: reopening flushed file at 1730982739340 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56d8eae1: reopening flushed file at 1730982739345 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@117a66b2: reopening flushed file at 1730982739349 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cadd1ec: reopening flushed file at 1730982739354 (+5 ms)Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=129, compaction requested=false at 1730982739360 (+6 ms)Writing region close event to WAL at 1730982739361 (+1 ms)Closed at 1730982739361 2024-11-07T12:32:19,362 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,362 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,362 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,362 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,362 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:19,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45435 is added to blk_1073741830_1006 (size=63915) 2024-11-07T12:32:19,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41829 is added to blk_1073741830_1006 (size=63915) 2024-11-07T12:32:19,766 INFO [M:0;87fb68174c52:37673 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:32:19,766 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:32:19,766 INFO [M:0;87fb68174c52:37673 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37673 2024-11-07T12:32:19,766 INFO [M:0;87fb68174c52:37673 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:32:19,796 INFO [regionserver/87fb68174c52:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:32:19,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:32:19,868 INFO [M:0;87fb68174c52:37673 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:32:19,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37673-0x1001a39f4470000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:32:19,870 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f53b1c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:32:19,871 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@550f8afc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:32:19,871 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:32:19,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79ecb530{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:32:19,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34accf12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/hadoop.log.dir/,STOPPED} 2024-11-07T12:32:19,873 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:32:19,873 WARN [BP-1980521643-172.17.0.3-1730982700847 heartbeating to localhost/127.0.0.1:35145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:32:19,873 WARN [BP-1980521643-172.17.0.3-1730982700847 heartbeating to localhost/127.0.0.1:35145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1980521643-172.17.0.3-1730982700847 (Datanode Uuid b243b6f2-cfdc-4c7b-a362-fde95abf6bd2) service to localhost/127.0.0.1:35145 2024-11-07T12:32:19,873 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:32:19,873 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/data/data3/current/BP-1980521643-172.17.0.3-1730982700847 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:32:19,873 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/data/data4/current/BP-1980521643-172.17.0.3-1730982700847 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:32:19,874 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:32:19,875 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@314e7370{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:32:19,876 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45bda0cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:32:19,876 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:32:19,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75b3fca0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:32:19,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f8818bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/hadoop.log.dir/,STOPPED} 2024-11-07T12:32:19,878 WARN [BP-1980521643-172.17.0.3-1730982700847 heartbeating to localhost/127.0.0.1:35145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:32:19,878 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:32:19,878 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:32:19,878 WARN [BP-1980521643-172.17.0.3-1730982700847 heartbeating to localhost/127.0.0.1:35145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1980521643-172.17.0.3-1730982700847 (Datanode Uuid b421e928-a962-4517-9bda-a783f78d935c) service to localhost/127.0.0.1:35145 2024-11-07T12:32:19,878 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/data/data1/current/BP-1980521643-172.17.0.3-1730982700847 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:32:19,878 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/cluster_ffcb906b-300d-d03c-c0f8-691bbb129366/data/data2/current/BP-1980521643-172.17.0.3-1730982700847 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:32:19,879 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:32:19,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2dd2b381{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:32:19,885 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69826858{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:32:19,885 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:32:19,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b7f954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:32:19,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c5b7d7a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/hadoop.log.dir/,STOPPED} 2024-11-07T12:32:19,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:19,892 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:32:19,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:32:19,926 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 206) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35145 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35145 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35145 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35145 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35145 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35145 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:35145 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35145 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=30 (was 29) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6348 (was 6381) 2024-11-07T12:32:19,934 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=30, ProcessCount=11, AvailableMemoryMB=6348 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/hadoop.log.dir so I do NOT create it in target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/92c0cee9-da64-1e7f-47cc-67451f5ab76f/hadoop.tmp.dir so I do NOT create it in target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5, deleteOnExit=true 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/test.cache.data in system properties and HBase conf 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:32:19,935 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:32:19,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:32:19,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:32:19,949 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:32:19,999 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:32:20,002 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:32:20,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:32:20,006 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:32:20,006 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:32:20,006 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:32:20,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e07b1d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:32:20,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a024a47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:32:20,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:20,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5be97557{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/java.io.tmpdir/jetty-localhost-37623-hadoop-hdfs-3_4_1-tests_jar-_-any-5439779490859463068/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:32:20,121 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ce66601{HTTP/1.1, (http/1.1)}{localhost:37623} 2024-11-07T12:32:20,121 INFO [Time-limited test {}] server.Server(415): Started @278793ms 2024-11-07T12:32:20,133 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:32:20,178 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:32:20,181 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:32:20,181 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:32:20,181 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:32:20,181 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:32:20,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b01e479{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:32:20,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20042785{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:32:20,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:20,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@265f6a26{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/java.io.tmpdir/jetty-localhost-33243-hadoop-hdfs-3_4_1-tests_jar-_-any-181606788822376017/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:32:20,296 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d6e4d22{HTTP/1.1, (http/1.1)}{localhost:33243} 2024-11-07T12:32:20,296 INFO [Time-limited test {}] server.Server(415): Started @278969ms 2024-11-07T12:32:20,297 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:32:20,327 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:32:20,329 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:32:20,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:32:20,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:32:20,330 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:32:20,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9268180{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:32:20,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e818af2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:32:20,375 WARN [Thread-2477 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/data/data1/current/BP-2037743226-172.17.0.3-1730982739954/current, will proceed with Du for space computation calculation, 2024-11-07T12:32:20,375 WARN [Thread-2478 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/data/data2/current/BP-2037743226-172.17.0.3-1730982739954/current, will proceed with Du for space computation calculation, 2024-11-07T12:32:20,391 WARN [Thread-2456 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:32:20,394 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e42eb545183e87b with lease ID 0x34527c95c289a6d0: Processing first storage report for DS-04b9141d-5dc3-4940-aa23-159235a0b3de from datanode DatanodeRegistration(127.0.0.1:35887, datanodeUuid=1812218f-8319-47d0-8a39-c4601c9d059f, infoPort=38159, infoSecurePort=0, ipcPort=34035, storageInfo=lv=-57;cid=testClusterID;nsid=655849081;c=1730982739954) 2024-11-07T12:32:20,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e42eb545183e87b with lease ID 0x34527c95c289a6d0: from storage DS-04b9141d-5dc3-4940-aa23-159235a0b3de node DatanodeRegistration(127.0.0.1:35887, datanodeUuid=1812218f-8319-47d0-8a39-c4601c9d059f, infoPort=38159, infoSecurePort=0, ipcPort=34035, storageInfo=lv=-57;cid=testClusterID;nsid=655849081;c=1730982739954), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:32:20,394 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e42eb545183e87b with lease ID 0x34527c95c289a6d0: Processing first storage report for DS-92383ead-4a35-4320-8d68-3d65f715749a from datanode DatanodeRegistration(127.0.0.1:35887, datanodeUuid=1812218f-8319-47d0-8a39-c4601c9d059f, infoPort=38159, infoSecurePort=0, ipcPort=34035, storageInfo=lv=-57;cid=testClusterID;nsid=655849081;c=1730982739954) 2024-11-07T12:32:20,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e42eb545183e87b with lease ID 0x34527c95c289a6d0: from storage DS-92383ead-4a35-4320-8d68-3d65f715749a node DatanodeRegistration(127.0.0.1:35887, datanodeUuid=1812218f-8319-47d0-8a39-c4601c9d059f, infoPort=38159, infoSecurePort=0, ipcPort=34035, storageInfo=lv=-57;cid=testClusterID;nsid=655849081;c=1730982739954), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:32:20,450 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45235d1a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/java.io.tmpdir/jetty-localhost-42589-hadoop-hdfs-3_4_1-tests_jar-_-any-4098461643813151214/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:32:20,451 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2527c84a{HTTP/1.1, (http/1.1)}{localhost:42589} 2024-11-07T12:32:20,451 INFO [Time-limited test {}] server.Server(415): Started @279123ms 2024-11-07T12:32:20,452 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:32:20,526 WARN [Thread-2503 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/data/data3/current/BP-2037743226-172.17.0.3-1730982739954/current, will proceed with Du for space computation calculation, 2024-11-07T12:32:20,526 WARN [Thread-2504 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/data/data4/current/BP-2037743226-172.17.0.3-1730982739954/current, will proceed with Du for space computation calculation, 2024-11-07T12:32:20,542 WARN [Thread-2492 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:32:20,544 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x13898f3de905eb7 with lease ID 0x34527c95c289a6d1: Processing first storage report for DS-4ee01838-9cbc-4ff1-b5f3-b99e423348fe from datanode DatanodeRegistration(127.0.0.1:34361, datanodeUuid=b87e22c1-6edb-4956-999f-0258120d25d9, infoPort=46097, infoSecurePort=0, ipcPort=36853, storageInfo=lv=-57;cid=testClusterID;nsid=655849081;c=1730982739954) 2024-11-07T12:32:20,544 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x13898f3de905eb7 with lease ID 0x34527c95c289a6d1: from storage DS-4ee01838-9cbc-4ff1-b5f3-b99e423348fe node DatanodeRegistration(127.0.0.1:34361, datanodeUuid=b87e22c1-6edb-4956-999f-0258120d25d9, infoPort=46097, infoSecurePort=0, ipcPort=36853, storageInfo=lv=-57;cid=testClusterID;nsid=655849081;c=1730982739954), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:32:20,544 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x13898f3de905eb7 with lease ID 0x34527c95c289a6d1: Processing first storage report for DS-71f4e754-7bcc-4e52-8d27-6dca8feb8d4b from datanode DatanodeRegistration(127.0.0.1:34361, datanodeUuid=b87e22c1-6edb-4956-999f-0258120d25d9, infoPort=46097, infoSecurePort=0, ipcPort=36853, storageInfo=lv=-57;cid=testClusterID;nsid=655849081;c=1730982739954) 2024-11-07T12:32:20,544 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x13898f3de905eb7 with lease ID 0x34527c95c289a6d1: from storage DS-71f4e754-7bcc-4e52-8d27-6dca8feb8d4b node DatanodeRegistration(127.0.0.1:34361, datanodeUuid=b87e22c1-6edb-4956-999f-0258120d25d9, infoPort=46097, infoSecurePort=0, ipcPort=36853, storageInfo=lv=-57;cid=testClusterID;nsid=655849081;c=1730982739954), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:32:20,573 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3 2024-11-07T12:32:20,575 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/zookeeper_0, clientPort=54028, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:32:20,576 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54028 2024-11-07T12:32:20,576 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:32:20,577 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:32:20,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:32:20,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:32:20,588 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c with version=8 2024-11-07T12:32:20,588 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35819/user/jenkins/test-data/fa703b80-7eb4-efd7-f389-d6ed41f06865/hbase-staging 2024-11-07T12:32:20,590 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:32:20,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:32:20,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:32:20,590 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:32:20,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:32:20,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:32:20,590 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:32:20,590 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:32:20,591 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34287 2024-11-07T12:32:20,592 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34287 connecting to ZooKeeper ensemble=127.0.0.1:54028 2024-11-07T12:32:20,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:342870x0, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:32:20,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34287-0x1001a3a8cfd0000 connected 2024-11-07T12:32:20,610 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:32:20,611 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:32:20,613 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:32:20,613 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c, hbase.cluster.distributed=false 2024-11-07T12:32:20,615 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:32:20,615 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34287 2024-11-07T12:32:20,615 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34287 2024-11-07T12:32:20,615 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34287 2024-11-07T12:32:20,616 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34287 2024-11-07T12:32:20,616 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34287 2024-11-07T12:32:20,630 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87fb68174c52:0 server-side Connection retries=45 2024-11-07T12:32:20,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:32:20,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:32:20,630 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:32:20,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:32:20,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:32:20,630 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:32:20,631 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:32:20,631 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38689 2024-11-07T12:32:20,632 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38689 connecting to ZooKeeper ensemble=127.0.0.1:54028 2024-11-07T12:32:20,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:32:20,634 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:32:20,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:386890x0, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:32:20,637 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38689-0x1001a3a8cfd0001 connected 2024-11-07T12:32:20,637 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:32:20,638 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:32:20,638 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:32:20,639 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:32:20,639 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:32:20,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38689 2024-11-07T12:32:20,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38689 2024-11-07T12:32:20,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38689 2024-11-07T12:32:20,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38689 2024-11-07T12:32:20,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38689 2024-11-07T12:32:20,651 DEBUG [M:0;87fb68174c52:34287 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87fb68174c52:34287 2024-11-07T12:32:20,652 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87fb68174c52,34287,1730982740589 2024-11-07T12:32:20,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:32:20,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:32:20,654 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87fb68174c52,34287,1730982740589 2024-11-07T12:32:20,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:32:20,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:20,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:20,655 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:32:20,656 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87fb68174c52,34287,1730982740589 from backup master directory 2024-11-07T12:32:20,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87fb68174c52,34287,1730982740589 2024-11-07T12:32:20,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:32:20,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:32:20,657 WARN [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:32:20,657 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87fb68174c52,34287,1730982740589 2024-11-07T12:32:20,660 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/hbase.id] with ID: aff98e26-f8fa-4a15-8b2c-db73d354eaf5 2024-11-07T12:32:20,660 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/.tmp/hbase.id 2024-11-07T12:32:20,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:32:20,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:32:20,666 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/.tmp/hbase.id]:[hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/hbase.id] 2024-11-07T12:32:20,676 INFO [master/87fb68174c52:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:32:20,676 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:32:20,677 INFO [master/87fb68174c52:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-07T12:32:20,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:20,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:20,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:32:20,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:32:20,687 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:32:20,687 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:32:20,688 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:32:20,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:32:20,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:32:20,697 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store 2024-11-07T12:32:20,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:32:20,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:32:20,703 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:32:20,703 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:32:20,703 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:20,703 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:20,703 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:32:20,703 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:20,703 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:20,703 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982740703Disabling compacts and flushes for region at 1730982740703Disabling writes for close at 1730982740703Writing region close event to WAL at 1730982740703Closed at 1730982740703 2024-11-07T12:32:20,704 WARN [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/.initializing 2024-11-07T12:32:20,704 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/WALs/87fb68174c52,34287,1730982740589 2024-11-07T12:32:20,706 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C34287%2C1730982740589, suffix=, logDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/WALs/87fb68174c52,34287,1730982740589, archiveDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/oldWALs, maxLogs=10 2024-11-07T12:32:20,706 INFO [master/87fb68174c52:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C34287%2C1730982740589.1730982740706 2024-11-07T12:32:20,710 INFO [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/WALs/87fb68174c52,34287,1730982740589/87fb68174c52%2C34287%2C1730982740589.1730982740706 2024-11-07T12:32:20,711 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46097:46097),(127.0.0.1/127.0.0.1:38159:38159)] 2024-11-07T12:32:20,715 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:32:20,715 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:32:20,715 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,715 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,716 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:32:20,718 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:20,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:32:20,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:32:20,720 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:20,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:32:20,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:32:20,721 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:20,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:32:20,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,722 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:32:20,722 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:20,723 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:32:20,723 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,723 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,723 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,725 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,725 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,725 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:32:20,726 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:32:20,728 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:32:20,728 INFO [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878763, jitterRate=0.11740493774414062}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:32:20,729 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730982740715Initializing all the Stores at 1730982740716 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982740716Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982740716Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982740716Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982740716Cleaning up temporary data from old regions at 1730982740725 (+9 ms)Region opened successfully at 1730982740728 (+3 ms) 2024-11-07T12:32:20,729 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:32:20,731 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@488b13c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:32:20,732 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:32:20,732 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:32:20,732 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:32:20,732 INFO [master/87fb68174c52:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:32:20,733 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:32:20,733 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:32:20,733 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:32:20,735 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:32:20,735 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:32:20,737 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:32:20,737 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:32:20,737 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:32:20,738 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:32:20,739 INFO [master/87fb68174c52:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:32:20,739 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:32:20,741 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:32:20,742 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:32:20,743 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:32:20,745 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:32:20,746 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:32:20,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:32:20,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:32:20,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:20,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:20,748 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87fb68174c52,34287,1730982740589, sessionid=0x1001a3a8cfd0000, setting cluster-up flag (Was=false) 2024-11-07T12:32:20,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:20,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:20,755 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:32:20,756 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,34287,1730982740589 2024-11-07T12:32:20,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:20,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:20,763 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:32:20,764 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87fb68174c52,34287,1730982740589 2024-11-07T12:32:20,765 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:32:20,767 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:32:20,767 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:32:20,767 INFO [master/87fb68174c52:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:32:20,767 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87fb68174c52,34287,1730982740589 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:32:20,768 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:32:20,768 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:32:20,769 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:32:20,769 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87fb68174c52:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:32:20,769 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87fb68174c52:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:32:20,769 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,769 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:32:20,769 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,773 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730982770773 2024-11-07T12:32:20,773 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:32:20,774 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:32:20,774 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:32:20,774 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:32:20,774 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:32:20,774 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:32:20,774 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:32:20,774 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:32:20,774 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,774 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:32:20,774 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:32:20,774 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:32:20,775 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:32:20,775 INFO [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:32:20,775 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982740775,5,FailOnTimeoutGroup] 2024-11-07T12:32:20,775 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:20,775 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982740775,5,FailOnTimeoutGroup] 2024-11-07T12:32:20,775 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,775 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:32:20,775 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:32:20,775 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,775 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:32:20,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:32:20,787 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:32:20,787 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c 2024-11-07T12:32:20,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:32:20,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:32:20,798 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:32:20,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:32:20,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:32:20,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:20,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:32:20,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:32:20,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:32:20,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:20,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:32:20,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:32:20,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:32:20,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:20,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:32:20,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:32:20,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:32:20,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:20,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:32:20,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:32:20,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740 2024-11-07T12:32:20,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740 2024-11-07T12:32:20,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:32:20,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:32:20,809 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:32:20,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:32:20,812 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:32:20,812 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749928, jitterRate=-0.046417757868766785}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:32:20,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730982740798Initializing all the Stores at 1730982740799 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982740799Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982740800 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982740800Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982740800Cleaning up temporary data from old regions at 1730982740808 (+8 ms)Region opened successfully at 1730982740813 (+5 ms) 2024-11-07T12:32:20,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:32:20,813 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:32:20,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:32:20,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:32:20,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:32:20,814 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:32:20,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982740813Disabling compacts and flushes for region at 1730982740813Disabling writes for close at 1730982740813Writing region close event to WAL at 1730982740814 (+1 ms)Closed at 1730982740814 2024-11-07T12:32:20,815 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:32:20,815 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:32:20,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:32:20,817 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:32:20,818 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:32:20,842 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(746): ClusterId : aff98e26-f8fa-4a15-8b2c-db73d354eaf5 2024-11-07T12:32:20,842 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:32:20,845 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:32:20,845 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:32:20,846 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:32:20,847 DEBUG [RS:0;87fb68174c52:38689 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60ef51dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87fb68174c52/172.17.0.3:0 2024-11-07T12:32:20,858 DEBUG [RS:0;87fb68174c52:38689 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87fb68174c52:38689 2024-11-07T12:32:20,858 INFO [RS:0;87fb68174c52:38689 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:32:20,858 INFO [RS:0;87fb68174c52:38689 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:32:20,858 DEBUG [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:32:20,859 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(2659): reportForDuty to master=87fb68174c52,34287,1730982740589 with port=38689, startcode=1730982740630 2024-11-07T12:32:20,859 DEBUG [RS:0;87fb68174c52:38689 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:32:20,862 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58153, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:32:20,862 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34287 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87fb68174c52,38689,1730982740630 2024-11-07T12:32:20,862 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34287 {}] master.ServerManager(517): Registering regionserver=87fb68174c52,38689,1730982740630 2024-11-07T12:32:20,863 DEBUG [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c 2024-11-07T12:32:20,863 DEBUG [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35233 2024-11-07T12:32:20,863 DEBUG [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:32:20,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:32:20,865 DEBUG [RS:0;87fb68174c52:38689 {}] zookeeper.ZKUtil(111): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87fb68174c52,38689,1730982740630 2024-11-07T12:32:20,865 WARN [RS:0;87fb68174c52:38689 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:32:20,865 INFO [RS:0;87fb68174c52:38689 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:32:20,865 DEBUG [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/87fb68174c52,38689,1730982740630 2024-11-07T12:32:20,866 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87fb68174c52,38689,1730982740630] 2024-11-07T12:32:20,868 INFO [RS:0;87fb68174c52:38689 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:32:20,870 INFO [RS:0;87fb68174c52:38689 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:32:20,870 INFO [RS:0;87fb68174c52:38689 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:32:20,870 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,870 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:32:20,871 INFO [RS:0;87fb68174c52:38689 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:32:20,871 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87fb68174c52:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87fb68174c52:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:32:20,871 DEBUG [RS:0;87fb68174c52:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87fb68174c52:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:32:20,872 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,872 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,872 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,872 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,872 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,872 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,38689,1730982740630-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:32:20,886 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:32:20,886 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,38689,1730982740630-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,886 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,886 INFO [RS:0;87fb68174c52:38689 {}] regionserver.Replication(171): 87fb68174c52,38689,1730982740630 started 2024-11-07T12:32:20,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:20,899 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:20,899 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(1482): Serving as 87fb68174c52,38689,1730982740630, RpcServer on 87fb68174c52/172.17.0.3:38689, sessionid=0x1001a3a8cfd0001 2024-11-07T12:32:20,899 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:32:20,899 DEBUG [RS:0;87fb68174c52:38689 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87fb68174c52,38689,1730982740630 2024-11-07T12:32:20,899 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,38689,1730982740630' 2024-11-07T12:32:20,899 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:32:20,900 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:32:20,900 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:32:20,900 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:32:20,900 DEBUG [RS:0;87fb68174c52:38689 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87fb68174c52,38689,1730982740630 2024-11-07T12:32:20,900 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87fb68174c52,38689,1730982740630' 2024-11-07T12:32:20,901 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:32:20,901 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:32:20,901 DEBUG [RS:0;87fb68174c52:38689 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:32:20,901 INFO [RS:0;87fb68174c52:38689 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:32:20,901 INFO [RS:0;87fb68174c52:38689 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:32:20,968 WARN [87fb68174c52:34287 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:32:21,003 INFO [RS:0;87fb68174c52:38689 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C38689%2C1730982740630, suffix=, logDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/87fb68174c52,38689,1730982740630, archiveDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/oldWALs, maxLogs=32 2024-11-07T12:32:21,003 INFO [RS:0;87fb68174c52:38689 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C38689%2C1730982740630.1730982741003 2024-11-07T12:32:21,008 INFO [RS:0;87fb68174c52:38689 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/87fb68174c52,38689,1730982740630/87fb68174c52%2C38689%2C1730982740630.1730982741003 2024-11-07T12:32:21,009 DEBUG [RS:0;87fb68174c52:38689 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46097:46097),(127.0.0.1/127.0.0.1:38159:38159)] 2024-11-07T12:32:21,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,42371,1730982565959/87fb68174c52%2C42371%2C1730982565959.1730982566180 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:21,218 DEBUG [87fb68174c52:34287 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:32:21,218 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87fb68174c52,38689,1730982740630 2024-11-07T12:32:21,220 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,38689,1730982740630, state=OPENING 2024-11-07T12:32:21,221 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:32:21,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:21,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:21,222 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:32:21,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:32:21,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:32:21,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,38689,1730982740630}] 2024-11-07T12:32:21,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.meta.1730982565817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:21,375 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:32:21,377 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38177, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:32:21,380 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:32:21,380 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:32:21,381 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87fb68174c52%2C38689%2C1730982740630.meta, suffix=.meta, logDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/87fb68174c52,38689,1730982740630, archiveDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/oldWALs, maxLogs=32 2024-11-07T12:32:21,382 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87fb68174c52%2C38689%2C1730982740630.meta.1730982741381.meta 2024-11-07T12:32:21,386 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/87fb68174c52,38689,1730982740630/87fb68174c52%2C38689%2C1730982740630.meta.1730982741381.meta 2024-11-07T12:32:21,387 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46097:46097),(127.0.0.1/127.0.0.1:38159:38159)] 2024-11-07T12:32:21,391 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:32:21,391 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:32:21,391 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:32:21,392 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:32:21,392 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:32:21,392 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:32:21,392 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:32:21,392 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:32:21,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:32:21,394 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:32:21,394 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:21,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:32:21,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:32:21,395 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:32:21,395 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:21,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:32:21,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:32:21,395 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:32:21,395 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:21,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:32:21,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:32:21,396 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:32:21,396 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:32:21,397 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:32:21,397 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:32:21,397 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740 2024-11-07T12:32:21,398 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740 2024-11-07T12:32:21,399 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:32:21,399 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:32:21,399 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:32:21,400 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:32:21,401 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857878, jitterRate=0.09084944427013397}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:32:21,401 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:32:21,402 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730982741392Writing region info on filesystem at 1730982741392Initializing all the Stores at 1730982741392Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982741393 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982741393Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730982741393Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730982741393Cleaning up temporary data from old regions at 1730982741399 (+6 ms)Running coprocessor post-open hooks at 1730982741401 (+2 ms)Region opened successfully at 1730982741402 (+1 ms) 2024-11-07T12:32:21,402 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730982741375 2024-11-07T12:32:21,404 DEBUG [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:32:21,404 INFO [RS_OPEN_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:32:21,405 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87fb68174c52,38689,1730982740630 2024-11-07T12:32:21,405 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87fb68174c52,38689,1730982740630, state=OPEN 2024-11-07T12:32:21,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:32:21,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:32:21,410 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87fb68174c52,38689,1730982740630 2024-11-07T12:32:21,410 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:32:21,410 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:32:21,412 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:32:21,412 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87fb68174c52,38689,1730982740630 in 188 msec 2024-11-07T12:32:21,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:32:21,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 597 msec 2024-11-07T12:32:21,414 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:32:21,414 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:32:21,416 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:32:21,416 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,38689,1730982740630, seqNum=-1] 2024-11-07T12:32:21,416 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:32:21,417 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43657, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:32:21,421 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 654 msec 2024-11-07T12:32:21,421 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730982741421, completionTime=-1 2024-11-07T12:32:21,421 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:32:21,421 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-07T12:32:21,423 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-07T12:32:21,423 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730982801423 2024-11-07T12:32:21,423 INFO [master/87fb68174c52:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730982861423 2024-11-07T12:32:21,423 INFO [master/87fb68174c52:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-07T12:32:21,423 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34287,1730982740589-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:21,423 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34287,1730982740589-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:21,423 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34287,1730982740589-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:21,423 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87fb68174c52:34287, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:21,423 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:21,424 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:21,425 DEBUG [master/87fb68174c52:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:32:21,427 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.770sec 2024-11-07T12:32:21,427 INFO [master/87fb68174c52:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:32:21,427 INFO [master/87fb68174c52:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:32:21,427 INFO [master/87fb68174c52:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:32:21,427 INFO [master/87fb68174c52:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:32:21,427 INFO [master/87fb68174c52:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:32:21,427 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34287,1730982740589-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:32:21,427 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34287,1730982740589-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:32:21,429 DEBUG [master/87fb68174c52:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:32:21,429 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:32:21,429 INFO [master/87fb68174c52:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87fb68174c52,34287,1730982740589-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:32:21,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59a6b271, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:32:21,442 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87fb68174c52,34287,-1 for getting cluster id 2024-11-07T12:32:21,442 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:32:21,444 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'aff98e26-f8fa-4a15-8b2c-db73d354eaf5' 2024-11-07T12:32:21,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:32:21,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "aff98e26-f8fa-4a15-8b2c-db73d354eaf5" 2024-11-07T12:32:21,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c7f938e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:32:21,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87fb68174c52,34287,-1] 2024-11-07T12:32:21,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:32:21,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:21,445 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56708, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:32:21,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3396cecd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:32:21,446 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:32:21,447 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87fb68174c52,38689,1730982740630, seqNum=-1] 2024-11-07T12:32:21,447 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:32:21,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41834, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:32:21,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87fb68174c52,34287,1730982740589 2024-11-07T12:32:21,450 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:32:21,452 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:32:21,452 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:32:21,453 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/test.com,8080,1, archiveDir=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/oldWALs, maxLogs=32 2024-11-07T12:32:21,454 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1730982741454 2024-11-07T12:32:21,458 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/test.com,8080,1/test.com%2C8080%2C1.1730982741454 2024-11-07T12:32:21,459 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46097:46097),(127.0.0.1/127.0.0.1:38159:38159)] 2024-11-07T12:32:21,459 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1730982741459 2024-11-07T12:32:21,465 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,465 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,465 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,465 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,465 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,465 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/test.com,8080,1/test.com%2C8080%2C1.1730982741454 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/test.com,8080,1/test.com%2C8080%2C1.1730982741459 2024-11-07T12:32:21,466 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46097:46097),(127.0.0.1/127.0.0.1:38159:38159)] 2024-11-07T12:32:21,466 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/test.com,8080,1/test.com%2C8080%2C1.1730982741454 is not closed yet, will try archiving it next time 2024-11-07T12:32:21,467 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741835_1011 (size=93) 2024-11-07T12:32:21,467 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,467 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,467 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741835_1011 (size=93) 2024-11-07T12:32:21,467 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,468 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/WALs/test.com,8080,1/test.com%2C8080%2C1.1730982741454 to hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/oldWALs/test.com%2C8080%2C1.1730982741454 2024-11-07T12:32:21,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741836_1012 (size=93) 2024-11-07T12:32:21,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741836_1012 (size=93) 2024-11-07T12:32:21,471 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/oldWALs 2024-11-07T12:32:21,471 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1730982741459) 2024-11-07T12:32:21,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:32:21,471 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:32:21,471 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:32:21,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:21,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:21,471 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:32:21,471 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:32:21,471 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1540545152, stopped=false 2024-11-07T12:32:21,472 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87fb68174c52,34287,1730982740589 2024-11-07T12:32:21,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:32:21,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:32:21,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:21,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:21,473 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:32:21,473 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:32:21,473 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:32:21,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:21,474 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87fb68174c52,38689,1730982740630' ***** 2024-11-07T12:32:21,474 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:32:21,474 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:32:21,474 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:32:21,474 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:32:21,474 INFO [RS:0;87fb68174c52:38689 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:32:21,474 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:32:21,474 INFO [RS:0;87fb68174c52:38689 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:32:21,474 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(959): stopping server 87fb68174c52,38689,1730982740630 2024-11-07T12:32:21,474 INFO [RS:0;87fb68174c52:38689 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:32:21,474 INFO [RS:0;87fb68174c52:38689 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87fb68174c52:38689. 2024-11-07T12:32:21,474 DEBUG [RS:0;87fb68174c52:38689 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:32:21,474 DEBUG [RS:0;87fb68174c52:38689 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:21,474 INFO [RS:0;87fb68174c52:38689 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:32:21,474 INFO [RS:0;87fb68174c52:38689 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:32:21,474 INFO [RS:0;87fb68174c52:38689 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:32:21,474 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:32:21,475 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-07T12:32:21,475 DEBUG [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-07T12:32:21,475 DEBUG [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-07T12:32:21,475 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:32:21,475 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:32:21,475 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:32:21,475 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:32:21,475 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:32:21,475 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-07T12:32:21,490 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740/.tmp/ns/7fe3bbb36806457fae34069aa3a31c22 is 43, key is default/ns:d/1730982741417/Put/seqid=0 2024-11-07T12:32:21,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741837_1013 (size=5153) 2024-11-07T12:32:21,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741837_1013 (size=5153) 2024-11-07T12:32:21,494 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740/.tmp/ns/7fe3bbb36806457fae34069aa3a31c22 2024-11-07T12:32:21,499 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740/.tmp/ns/7fe3bbb36806457fae34069aa3a31c22 as hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740/ns/7fe3bbb36806457fae34069aa3a31c22 2024-11-07T12:32:21,503 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740/ns/7fe3bbb36806457fae34069aa3a31c22, entries=2, sequenceid=6, filesize=5.0 K 2024-11-07T12:32:21,504 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-11-07T12:32:21,507 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-07T12:32:21,508 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:32:21,508 INFO [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:32:21,508 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730982741475Running coprocessor pre-close hooks at 1730982741475Disabling compacts and flushes for region at 1730982741475Disabling writes for close at 1730982741475Obtaining lock to block concurrent updates at 1730982741475Preparing flush snapshotting stores in 1588230740 at 1730982741475Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1730982741475Flushing stores of hbase:meta,,1.1588230740 at 1730982741476 (+1 ms)Flushing 1588230740/ns: creating writer at 1730982741476Flushing 1588230740/ns: appending metadata at 1730982741489 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1730982741490 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ce7f13d: reopening flushed file at 1730982741498 (+8 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1730982741504 (+6 ms)Writing region close event to WAL at 1730982741504Running coprocessor post-close hooks at 1730982741508 (+4 ms)Closed at 1730982741508 2024-11-07T12:32:21,508 DEBUG [RS_CLOSE_META-regionserver/87fb68174c52:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:32:21,675 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(976): stopping server 87fb68174c52,38689,1730982740630; all regions closed. 2024-11-07T12:32:21,675 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,675 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,676 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,676 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,676 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741834_1010 (size=1152) 2024-11-07T12:32:21,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741834_1010 (size=1152) 2024-11-07T12:32:21,680 DEBUG [RS:0;87fb68174c52:38689 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/oldWALs 2024-11-07T12:32:21,680 INFO [RS:0;87fb68174c52:38689 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C38689%2C1730982740630.meta:.meta(num 1730982741381) 2024-11-07T12:32:21,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,680 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,681 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:32:21,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:32:21,684 DEBUG [RS:0;87fb68174c52:38689 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/oldWALs 2024-11-07T12:32:21,684 INFO [RS:0;87fb68174c52:38689 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87fb68174c52%2C38689%2C1730982740630:(num 1730982741003) 2024-11-07T12:32:21,684 DEBUG [RS:0;87fb68174c52:38689 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:32:21,684 INFO [RS:0;87fb68174c52:38689 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:32:21,684 INFO [RS:0;87fb68174c52:38689 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:32:21,684 INFO [RS:0;87fb68174c52:38689 {}] hbase.ChoreService(370): Chore service for: regionserver/87fb68174c52:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-07T12:32:21,684 INFO [RS:0;87fb68174c52:38689 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:32:21,684 INFO [regionserver/87fb68174c52:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:32:21,684 INFO [RS:0;87fb68174c52:38689 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38689 2024-11-07T12:32:21,686 INFO [RS:0;87fb68174c52:38689 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:32:21,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87fb68174c52,38689,1730982740630 2024-11-07T12:32:21,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:32:21,688 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87fb68174c52,38689,1730982740630] 2024-11-07T12:32:21,689 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87fb68174c52,38689,1730982740630 already deleted, retry=false 2024-11-07T12:32:21,689 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87fb68174c52,38689,1730982740630 expired; onlineServers=0 2024-11-07T12:32:21,689 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87fb68174c52,34287,1730982740589' ***** 2024-11-07T12:32:21,689 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:32:21,689 INFO [M:0;87fb68174c52:34287 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:32:21,689 INFO [M:0;87fb68174c52:34287 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:32:21,689 DEBUG [M:0;87fb68174c52:34287 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:32:21,689 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:32:21,689 DEBUG [M:0;87fb68174c52:34287 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:32:21,689 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982740775 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.large.0-1730982740775,5,FailOnTimeoutGroup] 2024-11-07T12:32:21,689 DEBUG [master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982740775 {}] cleaner.HFileCleaner(306): Exit Thread[master/87fb68174c52:0:becomeActiveMaster-HFileCleaner.small.0-1730982740775,5,FailOnTimeoutGroup] 2024-11-07T12:32:21,689 INFO [M:0;87fb68174c52:34287 {}] hbase.ChoreService(370): Chore service for: master/87fb68174c52:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:32:21,689 INFO [M:0;87fb68174c52:34287 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:32:21,689 DEBUG [M:0;87fb68174c52:34287 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:32:21,689 INFO [M:0;87fb68174c52:34287 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:32:21,689 INFO [M:0;87fb68174c52:34287 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:32:21,690 INFO [M:0;87fb68174c52:34287 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:32:21,690 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:32:21,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:32:21,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:32:21,691 DEBUG [M:0;87fb68174c52:34287 {}] zookeeper.ZKUtil(347): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:32:21,691 WARN [M:0;87fb68174c52:34287 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:32:21,691 INFO [M:0;87fb68174c52:34287 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/.lastflushedseqids 2024-11-07T12:32:21,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741838_1014 (size=108) 2024-11-07T12:32:21,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741838_1014 (size=108) 2024-11-07T12:32:21,696 INFO [M:0;87fb68174c52:34287 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:32:21,696 INFO [M:0;87fb68174c52:34287 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:32:21,696 DEBUG [M:0;87fb68174c52:34287 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:32:21,696 INFO [M:0;87fb68174c52:34287 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:21,696 DEBUG [M:0;87fb68174c52:34287 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:21,696 DEBUG [M:0;87fb68174c52:34287 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:32:21,696 DEBUG [M:0;87fb68174c52:34287 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:21,696 INFO [M:0;87fb68174c52:34287 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-07T12:32:21,712 DEBUG [M:0;87fb68174c52:34287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8b97783d823498b9a9c1c3e5dcdee42 is 82, key is hbase:meta,,1/info:regioninfo/1730982741405/Put/seqid=0 2024-11-07T12:32:21,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741839_1015 (size=5672) 2024-11-07T12:32:21,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741839_1015 (size=5672) 2024-11-07T12:32:21,717 INFO [M:0;87fb68174c52:34287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8b97783d823498b9a9c1c3e5dcdee42 2024-11-07T12:32:21,733 DEBUG [M:0;87fb68174c52:34287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2fe15a8b33164932beac0f38b78ed576 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1730982741420/Put/seqid=0 2024-11-07T12:32:21,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741840_1016 (size=5275) 2024-11-07T12:32:21,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741840_1016 (size=5275) 2024-11-07T12:32:21,737 INFO [M:0;87fb68174c52:34287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2fe15a8b33164932beac0f38b78ed576 2024-11-07T12:32:21,754 DEBUG [M:0;87fb68174c52:34287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b9b11845806f4509ae7b330ebe4d2642 is 69, key is 87fb68174c52,38689,1730982740630/rs:state/1730982740862/Put/seqid=0 2024-11-07T12:32:21,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741841_1017 (size=5156) 2024-11-07T12:32:21,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741841_1017 (size=5156) 2024-11-07T12:32:21,761 INFO [M:0;87fb68174c52:34287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b9b11845806f4509ae7b330ebe4d2642 2024-11-07T12:32:21,779 DEBUG [M:0;87fb68174c52:34287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4294343dd85a4dc2b126a35c361f7c17 is 52, key is load_balancer_on/state:d/1730982741451/Put/seqid=0 2024-11-07T12:32:21,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741842_1018 (size=5056) 2024-11-07T12:32:21,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741842_1018 (size=5056) 2024-11-07T12:32:21,783 INFO [M:0;87fb68174c52:34287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4294343dd85a4dc2b126a35c361f7c17 2024-11-07T12:32:21,788 DEBUG [M:0;87fb68174c52:34287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8b97783d823498b9a9c1c3e5dcdee42 as hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a8b97783d823498b9a9c1c3e5dcdee42 2024-11-07T12:32:21,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:32:21,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1001a3a8cfd0001, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:32:21,788 INFO [RS:0;87fb68174c52:38689 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:32:21,788 INFO [RS:0;87fb68174c52:38689 {}] regionserver.HRegionServer(1031): Exiting; stopping=87fb68174c52,38689,1730982740630; zookeeper connection closed. 2024-11-07T12:32:21,788 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56e074ca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56e074ca 2024-11-07T12:32:21,788 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:32:21,792 INFO [M:0;87fb68174c52:34287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a8b97783d823498b9a9c1c3e5dcdee42, entries=8, sequenceid=29, filesize=5.5 K 2024-11-07T12:32:21,792 DEBUG [M:0;87fb68174c52:34287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2fe15a8b33164932beac0f38b78ed576 as hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2fe15a8b33164932beac0f38b78ed576 2024-11-07T12:32:21,796 INFO [M:0;87fb68174c52:34287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2fe15a8b33164932beac0f38b78ed576, entries=3, sequenceid=29, filesize=5.2 K 2024-11-07T12:32:21,797 DEBUG [M:0;87fb68174c52:34287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b9b11845806f4509ae7b330ebe4d2642 as hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b9b11845806f4509ae7b330ebe4d2642 2024-11-07T12:32:21,800 INFO [M:0;87fb68174c52:34287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b9b11845806f4509ae7b330ebe4d2642, entries=1, sequenceid=29, filesize=5.0 K 2024-11-07T12:32:21,801 DEBUG [M:0;87fb68174c52:34287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4294343dd85a4dc2b126a35c361f7c17 as hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4294343dd85a4dc2b126a35c361f7c17 2024-11-07T12:32:21,804 INFO [M:0;87fb68174c52:34287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35233/user/jenkins/test-data/1404132f-423e-2a6e-60f3-c2b7a928383c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4294343dd85a4dc2b126a35c361f7c17, entries=1, sequenceid=29, filesize=4.9 K 2024-11-07T12:32:21,805 INFO [M:0;87fb68174c52:34287 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=29, compaction requested=false 2024-11-07T12:32:21,806 INFO [M:0;87fb68174c52:34287 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:32:21,807 DEBUG [M:0;87fb68174c52:34287 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730982741696Disabling compacts and flushes for region at 1730982741696Disabling writes for close at 1730982741696Obtaining lock to block concurrent updates at 1730982741696Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730982741696Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1730982741697 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730982741697Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730982741698 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730982741712 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730982741712Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730982741720 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730982741733 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730982741733Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730982741741 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730982741753 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730982741753Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730982741765 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730982741778 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730982741778Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4dc33a82: reopening flushed file at 1730982741787 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@633f2e37: reopening flushed file at 1730982741792 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@314dd39f: reopening flushed file at 1730982741796 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2875b582: reopening flushed file at 1730982741800 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=29, compaction requested=false at 1730982741805 (+5 ms)Writing region close event to WAL at 1730982741806 (+1 ms)Closed at 1730982741806 2024-11-07T12:32:21,807 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,807 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,807 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,807 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,807 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:32:21,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35887 is added to blk_1073741830_1006 (size=10311) 2024-11-07T12:32:21,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741830_1006 (size=10311) 2024-11-07T12:32:21,810 INFO [M:0;87fb68174c52:34287 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:32:21,810 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:32:21,810 INFO [M:0;87fb68174c52:34287 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34287 2024-11-07T12:32:21,810 INFO [M:0;87fb68174c52:34287 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:32:21,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39665/user/jenkins/test-data/198842d3-c225-1d12-e171-b697e78cc09c/WALs/87fb68174c52,45061,1730982564994/87fb68174c52%2C45061%2C1730982564994.1730982565406 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:32:21,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:32:21,912 INFO [M:0;87fb68174c52:34287 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:32:21,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1001a3a8cfd0000, quorum=127.0.0.1:54028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:32:21,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45235d1a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:32:21,914 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2527c84a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:32:21,914 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:32:21,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e818af2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:32:21,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9268180{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/hadoop.log.dir/,STOPPED} 2024-11-07T12:32:21,916 WARN [BP-2037743226-172.17.0.3-1730982739954 heartbeating to localhost/127.0.0.1:35233 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:32:21,916 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:32:21,916 WARN [BP-2037743226-172.17.0.3-1730982739954 heartbeating to localhost/127.0.0.1:35233 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2037743226-172.17.0.3-1730982739954 (Datanode Uuid b87e22c1-6edb-4956-999f-0258120d25d9) service to localhost/127.0.0.1:35233 2024-11-07T12:32:21,916 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:32:21,916 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/data/data3/current/BP-2037743226-172.17.0.3-1730982739954 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:32:21,917 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/data/data4/current/BP-2037743226-172.17.0.3-1730982739954 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:32:21,917 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:32:21,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@265f6a26{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:32:21,919 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d6e4d22{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:32:21,919 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:32:21,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20042785{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:32:21,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b01e479{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/hadoop.log.dir/,STOPPED} 2024-11-07T12:32:21,921 WARN [BP-2037743226-172.17.0.3-1730982739954 heartbeating to localhost/127.0.0.1:35233 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:32:21,921 WARN [BP-2037743226-172.17.0.3-1730982739954 heartbeating to localhost/127.0.0.1:35233 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2037743226-172.17.0.3-1730982739954 (Datanode Uuid 1812218f-8319-47d0-8a39-c4601c9d059f) service to localhost/127.0.0.1:35233 2024-11-07T12:32:21,921 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:32:21,921 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:32:21,921 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/data/data1/current/BP-2037743226-172.17.0.3-1730982739954 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:32:21,922 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/cluster_08ec72f8-9879-6904-f4c0-48d0cf367fe5/data/data2/current/BP-2037743226-172.17.0.3-1730982739954 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:32:21,922 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:32:21,927 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5be97557{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:32:21,927 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ce66601{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:32:21,928 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:32:21,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a024a47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:32:21,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e07b1d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb89cdc5-27be-8fc8-9d8c-674ec8beb6c3/hadoop.log.dir/,STOPPED} 2024-11-07T12:32:21,934 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:32:21,947 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:32:21,957 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 230) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35233 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35233 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35233 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35233 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35233 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35233 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:35233 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35233 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=30 (was 30), ProcessCount=11 (was 11), AvailableMemoryMB=6331 (was 6348)