2024-11-11 06:58:37,109 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-11 06:58:37,126 main DEBUG Took 0.014116 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-11 06:58:37,126 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-11 06:58:37,127 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-11 06:58:37,128 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-11 06:58:37,129 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,136 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-11 06:58:37,147 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,149 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,149 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,150 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,150 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,150 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,151 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,151 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,152 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,152 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,153 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,153 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,154 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,154 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,154 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,154 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,155 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,155 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,155 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,156 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,156 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,156 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,157 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,157 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 06:58:37,158 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,158 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-11 06:58:37,159 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 06:58:37,160 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-11 06:58:37,162 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-11 06:58:37,162 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-11 06:58:37,164 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-11 06:58:37,164 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-11 06:58:37,172 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-11 06:58:37,174 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-11 06:58:37,176 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-11 06:58:37,176 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-11 06:58:37,176 main DEBUG createAppenders(={Console}) 2024-11-11 06:58:37,177 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-11 06:58:37,177 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-11 06:58:37,178 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-11 06:58:37,178 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-11 06:58:37,178 main DEBUG OutputStream closed 2024-11-11 06:58:37,178 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-11 06:58:37,179 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-11 06:58:37,179 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-11 06:58:37,245 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-11 06:58:37,247 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-11 06:58:37,248 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-11 06:58:37,248 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-11 06:58:37,249 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-11 06:58:37,249 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-11 06:58:37,249 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-11 06:58:37,250 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-11 06:58:37,250 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-11 06:58:37,250 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-11 06:58:37,251 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-11 06:58:37,251 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-11 06:58:37,251 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-11 06:58:37,251 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-11 06:58:37,252 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-11 06:58:37,252 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-11 06:58:37,252 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-11 06:58:37,253 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-11 06:58:37,255 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11 06:58:37,256 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-11 06:58:37,256 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-11 06:58:37,257 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-11T06:58:37,520 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3 2024-11-11 06:58:37,523 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-11 06:58:37,523 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11T06:58:37,533 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-11T06:58:37,566 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=10, ProcessCount=11, AvailableMemoryMB=10117 2024-11-11T06:58:37,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T06:58:37,583 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426, deleteOnExit=true 2024-11-11T06:58:37,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T06:58:37,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/test.cache.data in system properties and HBase conf 2024-11-11T06:58:37,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T06:58:37,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/hadoop.log.dir in system properties and HBase conf 2024-11-11T06:58:37,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T06:58:37,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T06:58:37,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T06:58:37,670 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-11T06:58:37,755 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T06:58:37,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T06:58:37,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T06:58:37,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T06:58:37,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T06:58:37,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T06:58:37,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T06:58:37,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T06:58:37,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T06:58:37,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T06:58:37,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/nfs.dump.dir in system properties and HBase conf 2024-11-11T06:58:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/java.io.tmpdir in system properties and HBase conf 2024-11-11T06:58:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T06:58:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T06:58:37,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T06:58:38,220 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T06:58:38,616 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-11T06:58:38,694 INFO [Time-limited test {}] log.Log(170): Logging initialized @2273ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-11T06:58:38,770 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T06:58:38,832 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T06:58:38,852 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T06:58:38,852 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T06:58:38,853 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T06:58:38,867 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T06:58:38,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/hadoop.log.dir/,AVAILABLE} 2024-11-11T06:58:38,870 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T06:58:39,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/java.io.tmpdir/jetty-localhost-39937-hadoop-hdfs-3_4_1-tests_jar-_-any-1314761709439925357/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T06:58:39,081 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:39937} 2024-11-11T06:58:39,081 INFO [Time-limited test {}] server.Server(415): Started @2660ms 2024-11-11T06:58:39,115 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T06:58:39,465 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T06:58:39,472 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T06:58:39,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T06:58:39,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T06:58:39,473 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T06:58:39,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/hadoop.log.dir/,AVAILABLE} 2024-11-11T06:58:39,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T06:58:39,595 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/java.io.tmpdir/jetty-localhost-34473-hadoop-hdfs-3_4_1-tests_jar-_-any-10203351251088422678/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T06:58:39,596 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:34473} 2024-11-11T06:58:39,596 INFO [Time-limited test {}] server.Server(415): Started @3175ms 2024-11-11T06:58:39,651 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T06:58:39,773 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T06:58:39,778 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T06:58:39,780 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T06:58:39,780 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T06:58:39,780 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T06:58:39,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/hadoop.log.dir/,AVAILABLE} 2024-11-11T06:58:39,782 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T06:58:39,916 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/java.io.tmpdir/jetty-localhost-43287-hadoop-hdfs-3_4_1-tests_jar-_-any-7683922153223719983/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T06:58:39,917 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:43287} 2024-11-11T06:58:39,917 INFO [Time-limited test {}] server.Server(415): Started @3496ms 2024-11-11T06:58:39,920 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T06:58:40,116 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/data/data2/current/BP-1500220386-172.17.0.2-1731308318312/current, will proceed with Du for space computation calculation, 2024-11-11T06:58:40,116 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/data/data3/current/BP-1500220386-172.17.0.2-1731308318312/current, will proceed with Du for space computation calculation, 2024-11-11T06:58:40,116 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/data/data4/current/BP-1500220386-172.17.0.2-1731308318312/current, will proceed with Du for space computation calculation, 2024-11-11T06:58:40,116 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/data/data1/current/BP-1500220386-172.17.0.2-1731308318312/current, will proceed with Du for space computation calculation, 2024-11-11T06:58:40,174 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T06:58:40,175 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T06:58:40,273 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d7b9e676cdc8e31 with lease ID 0x9b1171b03a0a6ca8: Processing first storage report for DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2 from datanode DatanodeRegistration(127.0.0.1:33949, datanodeUuid=818f538e-f155-43f3-aa2e-cc37f287c309, infoPort=45219, infoSecurePort=0, ipcPort=42223, storageInfo=lv=-57;cid=testClusterID;nsid=1575807515;c=1731308318312) 2024-11-11T06:58:40,275 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d7b9e676cdc8e31 with lease ID 0x9b1171b03a0a6ca8: from storage DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2 node DatanodeRegistration(127.0.0.1:33949, datanodeUuid=818f538e-f155-43f3-aa2e-cc37f287c309, infoPort=45219, infoSecurePort=0, ipcPort=42223, storageInfo=lv=-57;cid=testClusterID;nsid=1575807515;c=1731308318312), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T06:58:40,275 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x61f610544d28d9b5 with lease ID 0x9b1171b03a0a6ca7: Processing first storage report for DS-7e3723af-628a-46be-a32f-fe46cfe29692 from datanode DatanodeRegistration(127.0.0.1:37861, datanodeUuid=bd568af2-f574-4267-a040-ae0e1b3e2746, infoPort=35447, infoSecurePort=0, ipcPort=37195, storageInfo=lv=-57;cid=testClusterID;nsid=1575807515;c=1731308318312) 2024-11-11T06:58:40,275 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x61f610544d28d9b5 with lease ID 0x9b1171b03a0a6ca7: from storage DS-7e3723af-628a-46be-a32f-fe46cfe29692 node DatanodeRegistration(127.0.0.1:37861, datanodeUuid=bd568af2-f574-4267-a040-ae0e1b3e2746, infoPort=35447, infoSecurePort=0, ipcPort=37195, storageInfo=lv=-57;cid=testClusterID;nsid=1575807515;c=1731308318312), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T06:58:40,276 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d7b9e676cdc8e31 with lease ID 0x9b1171b03a0a6ca8: Processing first storage report for DS-6d5b663b-9ad9-4b2f-93e3-5c4eabeb5164 from datanode DatanodeRegistration(127.0.0.1:33949, datanodeUuid=818f538e-f155-43f3-aa2e-cc37f287c309, infoPort=45219, infoSecurePort=0, ipcPort=42223, storageInfo=lv=-57;cid=testClusterID;nsid=1575807515;c=1731308318312) 2024-11-11T06:58:40,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d7b9e676cdc8e31 with lease ID 0x9b1171b03a0a6ca8: from storage DS-6d5b663b-9ad9-4b2f-93e3-5c4eabeb5164 node DatanodeRegistration(127.0.0.1:33949, datanodeUuid=818f538e-f155-43f3-aa2e-cc37f287c309, infoPort=45219, infoSecurePort=0, ipcPort=42223, storageInfo=lv=-57;cid=testClusterID;nsid=1575807515;c=1731308318312), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T06:58:40,277 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x61f610544d28d9b5 with lease ID 0x9b1171b03a0a6ca7: Processing first storage report for DS-14d0e578-11c2-491a-abd8-868608799f5e from datanode DatanodeRegistration(127.0.0.1:37861, datanodeUuid=bd568af2-f574-4267-a040-ae0e1b3e2746, infoPort=35447, infoSecurePort=0, ipcPort=37195, storageInfo=lv=-57;cid=testClusterID;nsid=1575807515;c=1731308318312) 2024-11-11T06:58:40,277 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x61f610544d28d9b5 with lease ID 0x9b1171b03a0a6ca7: from storage DS-14d0e578-11c2-491a-abd8-868608799f5e node DatanodeRegistration(127.0.0.1:37861, datanodeUuid=bd568af2-f574-4267-a040-ae0e1b3e2746, infoPort=35447, infoSecurePort=0, ipcPort=37195, storageInfo=lv=-57;cid=testClusterID;nsid=1575807515;c=1731308318312), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T06:58:40,305 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3 2024-11-11T06:58:40,385 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/zookeeper_0, clientPort=58137, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T06:58:40,396 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58137 2024-11-11T06:58:40,407 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T06:58:40,410 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T06:58:40,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741825_1001 (size=7) 2024-11-11T06:58:40,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741825_1001 (size=7) 2024-11-11T06:58:41,053 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5 with version=8 2024-11-11T06:58:41,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/hbase-staging 2024-11-11T06:58:41,145 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-11T06:58:41,395 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T06:58:41,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T06:58:41,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T06:58:41,411 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T06:58:41,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T06:58:41,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T06:58:41,548 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T06:58:41,609 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-11T06:58:41,618 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-11T06:58:41,622 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T06:58:41,648 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19733 (auto-detected) 2024-11-11T06:58:41,649 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-11T06:58:41,667 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39009 2024-11-11T06:58:41,688 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39009 connecting to ZooKeeper ensemble=127.0.0.1:58137 2024-11-11T06:58:41,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:390090x0, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T06:58:41,723 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39009-0x10105d260fd0000 connected 2024-11-11T06:58:41,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T06:58:41,756 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T06:58:41,766 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T06:58:41,770 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5, hbase.cluster.distributed=false 2024-11-11T06:58:41,793 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T06:58:41,798 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39009 2024-11-11T06:58:41,798 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39009 2024-11-11T06:58:41,799 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39009 2024-11-11T06:58:41,799 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39009 2024-11-11T06:58:41,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39009 2024-11-11T06:58:41,920 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T06:58:41,922 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T06:58:41,922 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T06:58:41,922 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T06:58:41,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T06:58:41,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T06:58:41,927 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T06:58:41,930 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T06:58:41,931 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42887 2024-11-11T06:58:41,933 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42887 connecting to ZooKeeper ensemble=127.0.0.1:58137 2024-11-11T06:58:41,935 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T06:58:41,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T06:58:41,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428870x0, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T06:58:41,948 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42887-0x10105d260fd0001 connected 2024-11-11T06:58:41,948 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T06:58:41,954 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T06:58:41,963 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T06:58:41,965 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T06:58:41,970 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T06:58:41,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42887 2024-11-11T06:58:41,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42887 2024-11-11T06:58:41,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42887 2024-11-11T06:58:41,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42887 2024-11-11T06:58:41,979 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42887 2024-11-11T06:58:41,994 DEBUG [M:0;f334e8b0fbdf:39009 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f334e8b0fbdf:39009 2024-11-11T06:58:41,995 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f334e8b0fbdf,39009,1731308321197 2024-11-11T06:58:42,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T06:58:42,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T06:58:42,005 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f334e8b0fbdf,39009,1731308321197 2024-11-11T06:58:42,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:42,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T06:58:42,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:42,029 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T06:58:42,030 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f334e8b0fbdf,39009,1731308321197 from backup master directory 2024-11-11T06:58:42,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T06:58:42,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f334e8b0fbdf,39009,1731308321197 2024-11-11T06:58:42,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T06:58:42,035 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T06:58:42,035 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f334e8b0fbdf,39009,1731308321197 2024-11-11T06:58:42,037 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-11T06:58:42,039 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-11T06:58:42,095 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/hbase.id] with ID: f898c2ca-1631-48b2-964a-808e60d7f388 2024-11-11T06:58:42,095 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/.tmp/hbase.id 2024-11-11T06:58:42,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741826_1002 (size=42) 2024-11-11T06:58:42,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741826_1002 (size=42) 2024-11-11T06:58:42,109 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/.tmp/hbase.id]:[hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/hbase.id] 2024-11-11T06:58:42,156 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T06:58:42,161 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T06:58:42,181 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-11T06:58:42,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:42,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:42,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741827_1003 (size=196) 2024-11-11T06:58:42,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741827_1003 (size=196) 2024-11-11T06:58:42,219 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T06:58:42,222 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T06:58:42,228 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T06:58:42,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741828_1004 (size=1189) 2024-11-11T06:58:42,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741828_1004 (size=1189) 2024-11-11T06:58:42,282 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store 2024-11-11T06:58:42,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741829_1005 (size=34) 2024-11-11T06:58:42,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741829_1005 (size=34) 2024-11-11T06:58:42,308 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-11T06:58:42,312 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T06:58:42,314 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T06:58:42,314 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T06:58:42,314 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T06:58:42,316 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T06:58:42,317 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T06:58:42,317 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T06:58:42,319 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308322314Disabling compacts and flushes for region at 1731308322314Disabling writes for close at 1731308322316 (+2 ms)Writing region close event to WAL at 1731308322317 (+1 ms)Closed at 1731308322317 2024-11-11T06:58:42,321 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/.initializing 2024-11-11T06:58:42,321 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/WALs/f334e8b0fbdf,39009,1731308321197 2024-11-11T06:58:42,344 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C39009%2C1731308321197, suffix=, logDir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/WALs/f334e8b0fbdf,39009,1731308321197, archiveDir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/oldWALs, maxLogs=10 2024-11-11T06:58:42,354 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C39009%2C1731308321197.1731308322349 2024-11-11T06:58:42,374 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/WALs/f334e8b0fbdf,39009,1731308321197/f334e8b0fbdf%2C39009%2C1731308321197.1731308322349 2024-11-11T06:58:42,386 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35447:35447),(127.0.0.1/127.0.0.1:45219:45219)] 2024-11-11T06:58:42,387 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T06:58:42,387 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T06:58:42,390 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,392 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T06:58:42,460 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:42,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T06:58:42,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T06:58:42,466 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:42,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T06:58:42,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T06:58:42,470 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:42,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T06:58:42,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T06:58:42,474 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:42,475 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T06:58:42,475 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,478 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,479 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,484 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,484 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,488 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T06:58:42,491 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T06:58:42,495 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T06:58:42,496 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753635, jitterRate=-0.04170413315296173}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T06:58:42,502 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731308322404Initializing all the Stores at 1731308322406 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308322406Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308322407 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308322407Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308322408 (+1 ms)Cleaning up temporary data from old regions at 1731308322485 (+77 ms)Region opened successfully at 1731308322502 (+17 ms) 2024-11-11T06:58:42,503 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T06:58:42,538 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23b5b523, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T06:58:42,571 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T06:58:42,583 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T06:58:42,583 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T06:58:42,586 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T06:58:42,587 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-11T06:58:42,592 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-11T06:58:42,592 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T06:58:42,617 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T06:58:42,626 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T06:58:42,628 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T06:58:42,630 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T06:58:42,632 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T06:58:42,635 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T06:58:42,637 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T06:58:42,640 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T06:58:42,642 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T06:58:42,643 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T06:58:42,644 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T06:58:42,661 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T06:58:42,662 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T06:58:42,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T06:58:42,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T06:58:42,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:42,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:42,670 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f334e8b0fbdf,39009,1731308321197, sessionid=0x10105d260fd0000, setting cluster-up flag (Was=false) 2024-11-11T06:58:42,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:42,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:42,690 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T06:58:42,692 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,39009,1731308321197 2024-11-11T06:58:42,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:42,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:42,705 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T06:58:42,706 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,39009,1731308321197 2024-11-11T06:58:42,712 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T06:58:42,783 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(746): ClusterId : f898c2ca-1631-48b2-964a-808e60d7f388 2024-11-11T06:58:42,786 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T06:58:42,785 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T06:58:42,790 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T06:58:42,790 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T06:58:42,795 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T06:58:42,795 DEBUG [RS:0;f334e8b0fbdf:42887 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8410aab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T06:58:42,797 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T06:58:42,804 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T06:58:42,809 DEBUG [RS:0;f334e8b0fbdf:42887 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f334e8b0fbdf:42887 2024-11-11T06:58:42,811 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T06:58:42,811 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T06:58:42,812 DEBUG [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T06:58:42,809 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f334e8b0fbdf,39009,1731308321197 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T06:58:42,814 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(2659): reportForDuty to master=f334e8b0fbdf,39009,1731308321197 with port=42887, startcode=1731308321879 2024-11-11T06:58:42,817 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T06:58:42,817 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T06:58:42,817 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T06:58:42,817 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T06:58:42,817 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f334e8b0fbdf:0, corePoolSize=10, maxPoolSize=10 2024-11-11T06:58:42,818 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:42,818 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T06:58:42,818 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:42,822 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731308352821 2024-11-11T06:58:42,823 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T06:58:42,824 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T06:58:42,824 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T06:58:42,824 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T06:58:42,827 DEBUG [RS:0;f334e8b0fbdf:42887 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T06:58:42,828 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T06:58:42,828 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T06:58:42,829 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T06:58:42,829 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T06:58:42,830 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:42,830 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T06:58:42,830 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:42,834 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T06:58:42,835 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T06:58:42,836 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T06:58:42,839 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T06:58:42,839 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T06:58:42,842 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308322841,5,FailOnTimeoutGroup] 2024-11-11T06:58:42,852 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308322842,5,FailOnTimeoutGroup] 2024-11-11T06:58:42,853 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:42,853 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T06:58:42,855 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:42,855 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:42,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741831_1007 (size=1321) 2024-11-11T06:58:42,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741831_1007 (size=1321) 2024-11-11T06:58:42,859 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T06:58:42,860 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5 2024-11-11T06:58:42,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741832_1008 (size=32) 2024-11-11T06:58:42,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741832_1008 (size=32) 2024-11-11T06:58:42,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T06:58:42,883 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T06:58:42,887 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T06:58:42,887 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:42,888 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T06:58:42,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T06:58:42,892 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T06:58:42,892 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:42,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T06:58:42,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T06:58:42,896 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T06:58:42,896 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:42,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T06:58:42,898 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T06:58:42,900 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T06:58:42,901 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:42,902 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T06:58:42,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T06:58:42,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740 2024-11-11T06:58:42,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740 2024-11-11T06:58:42,907 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T06:58:42,907 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T06:58:42,909 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T06:58:42,909 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56991, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T06:58:42,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T06:58:42,917 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39009 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:42,919 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T06:58:42,920 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39009 {}] master.ServerManager(517): Registering regionserver=f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:42,921 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776290, jitterRate=-0.012896955013275146}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T06:58:42,925 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731308322881Initializing all the Stores at 1731308322883 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308322883Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308322883Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308322883Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308322883Cleaning up temporary data from old regions at 1731308322907 (+24 ms)Region opened successfully at 1731308322925 (+18 ms) 2024-11-11T06:58:42,925 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T06:58:42,925 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T06:58:42,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T06:58:42,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T06:58:42,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T06:58:42,932 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T06:58:42,932 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308322925Disabling compacts and flushes for region at 1731308322925Disabling writes for close at 1731308322926 (+1 ms)Writing region close event to WAL at 1731308322932 (+6 ms)Closed at 1731308322932 2024-11-11T06:58:42,935 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T06:58:42,935 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T06:58:42,936 DEBUG [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5 2024-11-11T06:58:42,936 DEBUG [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45731 2024-11-11T06:58:42,936 DEBUG [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T06:58:42,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T06:58:42,941 DEBUG [RS:0;f334e8b0fbdf:42887 {}] zookeeper.ZKUtil(111): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:42,941 WARN [RS:0;f334e8b0fbdf:42887 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T06:58:42,942 INFO [RS:0;f334e8b0fbdf:42887 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T06:58:42,942 DEBUG [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:42,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T06:58:42,944 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f334e8b0fbdf,42887,1731308321879] 2024-11-11T06:58:42,954 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T06:58:42,957 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T06:58:42,970 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T06:58:42,985 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T06:58:42,990 INFO [RS:0;f334e8b0fbdf:42887 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T06:58:42,990 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:42,991 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T06:58:42,998 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T06:58:42,999 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,000 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,000 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,000 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,000 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,001 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,001 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T06:58:43,001 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,001 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,002 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,002 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,002 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,002 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T06:58:43,002 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T06:58:43,003 DEBUG [RS:0;f334e8b0fbdf:42887 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T06:58:43,004 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,004 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,004 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,004 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,004 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,005 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,42887,1731308321879-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T06:58:43,024 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T06:58:43,026 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,42887,1731308321879-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,026 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,026 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.Replication(171): f334e8b0fbdf,42887,1731308321879 started 2024-11-11T06:58:43,044 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,045 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(1482): Serving as f334e8b0fbdf,42887,1731308321879, RpcServer on f334e8b0fbdf/172.17.0.2:42887, sessionid=0x10105d260fd0001 2024-11-11T06:58:43,046 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T06:58:43,046 DEBUG [RS:0;f334e8b0fbdf:42887 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:43,046 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,42887,1731308321879' 2024-11-11T06:58:43,046 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T06:58:43,047 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T06:58:43,048 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T06:58:43,048 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T06:58:43,048 DEBUG [RS:0;f334e8b0fbdf:42887 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:43,048 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,42887,1731308321879' 2024-11-11T06:58:43,048 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T06:58:43,049 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T06:58:43,049 DEBUG [RS:0;f334e8b0fbdf:42887 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T06:58:43,049 INFO [RS:0;f334e8b0fbdf:42887 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T06:58:43,050 INFO [RS:0;f334e8b0fbdf:42887 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T06:58:43,108 WARN [f334e8b0fbdf:39009 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T06:58:43,158 INFO [RS:0;f334e8b0fbdf:42887 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C42887%2C1731308321879, suffix=, logDir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879, archiveDir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs, maxLogs=32 2024-11-11T06:58:43,160 INFO [RS:0;f334e8b0fbdf:42887 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42887%2C1731308321879.1731308323160 2024-11-11T06:58:43,169 INFO [RS:0;f334e8b0fbdf:42887 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308323160 2024-11-11T06:58:43,170 DEBUG [RS:0;f334e8b0fbdf:42887 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35447:35447),(127.0.0.1/127.0.0.1:45219:45219)] 2024-11-11T06:58:43,361 DEBUG [f334e8b0fbdf:39009 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T06:58:43,374 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:43,380 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,42887,1731308321879, state=OPENING 2024-11-11T06:58:43,385 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T06:58:43,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:43,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T06:58:43,388 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T06:58:43,388 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T06:58:43,389 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T06:58:43,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,42887,1731308321879}] 2024-11-11T06:58:43,567 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T06:58:43,571 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54901, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T06:58:43,581 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T06:58:43,582 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T06:58:43,585 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C42887%2C1731308321879.meta, suffix=.meta, logDir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879, archiveDir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs, maxLogs=32 2024-11-11T06:58:43,587 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42887%2C1731308321879.meta.1731308323587.meta 2024-11-11T06:58:43,594 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.meta.1731308323587.meta 2024-11-11T06:58:43,595 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45219:45219),(127.0.0.1/127.0.0.1:35447:35447)] 2024-11-11T06:58:43,596 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T06:58:43,598 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T06:58:43,601 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T06:58:43,605 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T06:58:43,610 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T06:58:43,611 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T06:58:43,611 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T06:58:43,611 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T06:58:43,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T06:58:43,616 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T06:58:43,616 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:43,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T06:58:43,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T06:58:43,619 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T06:58:43,619 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:43,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T06:58:43,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T06:58:43,621 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T06:58:43,621 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:43,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T06:58:43,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T06:58:43,623 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T06:58:43,624 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:43,624 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T06:58:43,624 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T06:58:43,626 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740 2024-11-11T06:58:43,628 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740 2024-11-11T06:58:43,631 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T06:58:43,631 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T06:58:43,632 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T06:58:43,634 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T06:58:43,636 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731870, jitterRate=-0.06937943398952484}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T06:58:43,636 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T06:58:43,637 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731308323611Writing region info on filesystem at 1731308323612 (+1 ms)Initializing all the Stores at 1731308323614 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308323614Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308323614Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308323614Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308323614Cleaning up temporary data from old regions at 1731308323631 (+17 ms)Running coprocessor post-open hooks at 1731308323636 (+5 ms)Region opened successfully at 1731308323637 (+1 ms) 2024-11-11T06:58:43,645 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731308323558 2024-11-11T06:58:43,656 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T06:58:43,657 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T06:58:43,659 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:43,661 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,42887,1731308321879, state=OPEN 2024-11-11T06:58:43,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T06:58:43,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T06:58:43,668 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T06:58:43,668 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T06:58:43,669 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:43,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T06:58:43,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,42887,1731308321879 in 278 msec 2024-11-11T06:58:43,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T06:58:43,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 733 msec 2024-11-11T06:58:43,681 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T06:58:43,682 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T06:58:43,703 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T06:58:43,705 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,42887,1731308321879, seqNum=-1] 2024-11-11T06:58:43,728 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T06:58:43,730 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49533, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T06:58:43,750 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0080 sec 2024-11-11T06:58:43,751 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731308323751, completionTime=-1 2024-11-11T06:58:43,753 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T06:58:43,753 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T06:58:43,781 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-11T06:58:43,781 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731308383781 2024-11-11T06:58:43,781 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731308443781 2024-11-11T06:58:43,781 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 27 msec 2024-11-11T06:58:43,784 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39009,1731308321197-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,784 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39009,1731308321197-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,784 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39009,1731308321197-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,786 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f334e8b0fbdf:39009, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,786 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,786 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,792 DEBUG [master/f334e8b0fbdf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T06:58:43,813 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.778sec 2024-11-11T06:58:43,814 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T06:58:43,816 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T06:58:43,817 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T06:58:43,818 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T06:58:43,818 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T06:58:43,819 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39009,1731308321197-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T06:58:43,819 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39009,1731308321197-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T06:58:43,828 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T06:58:43,829 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T06:58:43,829 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39009,1731308321197-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T06:58:43,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4731d90b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T06:58:43,900 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-11T06:58:43,900 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-11T06:58:43,905 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f334e8b0fbdf,39009,-1 for getting cluster id 2024-11-11T06:58:43,907 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T06:58:43,916 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f898c2ca-1631-48b2-964a-808e60d7f388' 2024-11-11T06:58:43,919 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T06:58:43,919 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f898c2ca-1631-48b2-964a-808e60d7f388" 2024-11-11T06:58:43,922 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6914c27c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T06:58:43,922 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f334e8b0fbdf,39009,-1] 2024-11-11T06:58:43,925 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T06:58:43,926 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T06:58:43,928 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58620, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T06:58:43,931 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eb3700d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T06:58:43,931 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T06:58:43,941 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,42887,1731308321879, seqNum=-1] 2024-11-11T06:58:43,941 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T06:58:43,944 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46770, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T06:58:43,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f334e8b0fbdf,39009,1731308321197 2024-11-11T06:58:43,988 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T06:58:43,997 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T06:58:44,001 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T06:58:44,010 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is f334e8b0fbdf,39009,1731308321197 2024-11-11T06:58:44,013 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@705df763 2024-11-11T06:58:44,014 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T06:58:44,017 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58634, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T06:58:44,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39009 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T06:58:44,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39009 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T06:58:44,025 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39009 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T06:58:44,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39009 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-11T06:58:44,036 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T06:58:44,038 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39009 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-11T06:58:44,038 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:44,041 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T06:58:44,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39009 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T06:58:44,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741835_1011 (size=389) 2024-11-11T06:58:44,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741835_1011 (size=389) 2024-11-11T06:58:44,081 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b1c3e262b7fed51be42f4986d6865f56, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5 2024-11-11T06:58:44,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741836_1012 (size=72) 2024-11-11T06:58:44,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741836_1012 (size=72) 2024-11-11T06:58:44,097 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T06:58:44,097 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing b1c3e262b7fed51be42f4986d6865f56, disabling compactions & flushes 2024-11-11T06:58:44,097 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T06:58:44,097 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T06:58:44,097 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. after waiting 0 ms 2024-11-11T06:58:44,097 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T06:58:44,097 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T06:58:44,097 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for b1c3e262b7fed51be42f4986d6865f56: Waiting for close lock at 1731308324097Disabling compacts and flushes for region at 1731308324097Disabling writes for close at 1731308324097Writing region close event to WAL at 1731308324097Closed at 1731308324097 2024-11-11T06:58:44,100 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T06:58:44,105 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731308324100"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731308324100"}]},"ts":"1731308324100"} 2024-11-11T06:58:44,110 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T06:58:44,112 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T06:58:44,115 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731308324112"}]},"ts":"1731308324112"} 2024-11-11T06:58:44,120 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-11T06:58:44,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=b1c3e262b7fed51be42f4986d6865f56, ASSIGN}] 2024-11-11T06:58:44,124 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=b1c3e262b7fed51be42f4986d6865f56, ASSIGN 2024-11-11T06:58:44,126 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=b1c3e262b7fed51be42f4986d6865f56, ASSIGN; state=OFFLINE, location=f334e8b0fbdf,42887,1731308321879; forceNewPlan=false, retain=false 2024-11-11T06:58:44,278 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b1c3e262b7fed51be42f4986d6865f56, regionState=OPENING, regionLocation=f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:44,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=b1c3e262b7fed51be42f4986d6865f56, ASSIGN because future has completed 2024-11-11T06:58:44,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b1c3e262b7fed51be42f4986d6865f56, server=f334e8b0fbdf,42887,1731308321879}] 2024-11-11T06:58:44,444 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T06:58:44,445 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b1c3e262b7fed51be42f4986d6865f56, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.', STARTKEY => '', ENDKEY => ''} 2024-11-11T06:58:44,445 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,445 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T06:58:44,445 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,445 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,448 INFO [StoreOpener-b1c3e262b7fed51be42f4986d6865f56-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,450 INFO [StoreOpener-b1c3e262b7fed51be42f4986d6865f56-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b1c3e262b7fed51be42f4986d6865f56 columnFamilyName info 2024-11-11T06:58:44,450 DEBUG [StoreOpener-b1c3e262b7fed51be42f4986d6865f56-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T06:58:44,451 INFO [StoreOpener-b1c3e262b7fed51be42f4986d6865f56-1 {}] regionserver.HStore(327): Store=b1c3e262b7fed51be42f4986d6865f56/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T06:58:44,451 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,452 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,453 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,453 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,453 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,456 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,459 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T06:58:44,460 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b1c3e262b7fed51be42f4986d6865f56; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693798, jitterRate=-0.11779098212718964}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T06:58:44,460 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:58:44,461 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b1c3e262b7fed51be42f4986d6865f56: Running coprocessor pre-open hook at 1731308324445Writing region info on filesystem at 1731308324445Initializing all the Stores at 1731308324447 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308324447Cleaning up temporary data from old regions at 1731308324454 (+7 ms)Running coprocessor post-open hooks at 1731308324460 (+6 ms)Region opened successfully at 1731308324461 (+1 ms) 2024-11-11T06:58:44,463 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56., pid=6, masterSystemTime=1731308324437 2024-11-11T06:58:44,467 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T06:58:44,467 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T06:58:44,468 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b1c3e262b7fed51be42f4986d6865f56, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,42887,1731308321879 2024-11-11T06:58:44,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b1c3e262b7fed51be42f4986d6865f56, server=f334e8b0fbdf,42887,1731308321879 because future has completed 2024-11-11T06:58:44,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T06:58:44,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b1c3e262b7fed51be42f4986d6865f56, server=f334e8b0fbdf,42887,1731308321879 in 191 msec 2024-11-11T06:58:44,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T06:58:44,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=b1c3e262b7fed51be42f4986d6865f56, ASSIGN in 356 msec 2024-11-11T06:58:44,484 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T06:58:44,484 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731308324484"}]},"ts":"1731308324484"} 2024-11-11T06:58:44,487 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-11T06:58:44,488 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T06:58:44,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 461 msec 2024-11-11T06:58:49,113 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T06:58:49,177 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T06:58:49,179 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-11T06:58:51,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T06:58:51,606 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T06:58:51,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-11T06:58:51,608 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T06:58:51,609 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T06:58:51,609 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T06:58:51,609 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T06:58:51,609 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-11T06:58:54,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39009 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T06:58:54,126 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-11T06:58:54,129 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-11T06:58:54,135 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-11T06:58:54,135 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T06:58:54,136 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42887%2C1731308321879.1731308334136 2024-11-11T06:58:54,146 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:58:54,146 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:58:54,146 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:58:54,146 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:58:54,146 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:58:54,147 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308323160 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308334136 2024-11-11T06:58:54,148 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45219:45219),(127.0.0.1/127.0.0.1:35447:35447)] 2024-11-11T06:58:54,148 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308323160 is not closed yet, will try archiving it next time 2024-11-11T06:58:54,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741833_1009 (size=451) 2024-11-11T06:58:54,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741833_1009 (size=451) 2024-11-11T06:58:54,152 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308323160 to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs/f334e8b0fbdf%2C42887%2C1731308321879.1731308323160 2024-11-11T06:58:54,158 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56., hostname=f334e8b0fbdf,42887,1731308321879, seqNum=2] 2024-11-11T06:59:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42887 {}] regionserver.HRegion(8855): Flush requested on b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:59:06,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b1c3e262b7fed51be42f4986d6865f56 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T06:59:06,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/a2019e68b3b74057b3f26dc64b18f9d7 is 1080, key is row0001/info:/1731308334160/Put/seqid=0 2024-11-11T06:59:06,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741838_1014 (size=12509) 2024-11-11T06:59:06,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741838_1014 (size=12509) 2024-11-11T06:59:06,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/a2019e68b3b74057b3f26dc64b18f9d7 2024-11-11T06:59:06,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/a2019e68b3b74057b3f26dc64b18f9d7 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/a2019e68b3b74057b3f26dc64b18f9d7 2024-11-11T06:59:06,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/a2019e68b3b74057b3f26dc64b18f9d7, entries=7, sequenceid=11, filesize=12.2 K 2024-11-11T06:59:06,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for b1c3e262b7fed51be42f4986d6865f56 in 166ms, sequenceid=11, compaction requested=false 2024-11-11T06:59:06,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b1c3e262b7fed51be42f4986d6865f56: 2024-11-11T06:59:10,301 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T06:59:14,206 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42887%2C1731308321879.1731308354206 2024-11-11T06:59:14,415 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:14,415 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:14,415 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:14,415 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:14,415 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:14,415 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:14,416 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308334136 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308354206 2024-11-11T06:59:14,417 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35447:35447),(127.0.0.1/127.0.0.1:45219:45219)] 2024-11-11T06:59:14,417 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308334136 is not closed yet, will try archiving it next time 2024-11-11T06:59:14,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741837_1013 (size=12399) 2024-11-11T06:59:14,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741837_1013 (size=12399) 2024-11-11T06:59:14,621 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:16,825 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:19,029 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:21,233 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:21,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42887 {}] regionserver.HRegion(8855): Flush requested on b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:59:21,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b1c3e262b7fed51be42f4986d6865f56 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T06:59:21,436 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:21,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/5469dd55fd684b2ca12a6b870f5fbb01 is 1080, key is row0008/info:/1731308348195/Put/seqid=0 2024-11-11T06:59:21,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741840_1016 (size=12509) 2024-11-11T06:59:21,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741840_1016 (size=12509) 2024-11-11T06:59:21,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/5469dd55fd684b2ca12a6b870f5fbb01 2024-11-11T06:59:21,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/5469dd55fd684b2ca12a6b870f5fbb01 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/5469dd55fd684b2ca12a6b870f5fbb01 2024-11-11T06:59:21,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/5469dd55fd684b2ca12a6b870f5fbb01, entries=7, sequenceid=21, filesize=12.2 K 2024-11-11T06:59:21,674 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:21,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for b1c3e262b7fed51be42f4986d6865f56 in 440ms, sequenceid=21, compaction requested=false 2024-11-11T06:59:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b1c3e262b7fed51be42f4986d6865f56: 2024-11-11T06:59:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-11T06:59:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T06:59:21,675 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/a2019e68b3b74057b3f26dc64b18f9d7 because midkey is the same as first or last row 2024-11-11T06:59:23,438 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:24,470 INFO [master/f334e8b0fbdf:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T06:59:24,470 INFO [master/f334e8b0fbdf:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T06:59:25,642 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:25,644 WARN [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:25,645 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C42887%2C1731308321879:(num 1731308354206) roll requested 2024-11-11T06:59:25,646 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42887%2C1731308321879.1731308365645 2024-11-11T06:59:25,853 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:25,854 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:25,854 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:25,854 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:25,854 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:25,854 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:25,855 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308354206 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308365645 2024-11-11T06:59:25,855 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45219:45219),(127.0.0.1/127.0.0.1:35447:35447)] 2024-11-11T06:59:25,856 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308354206 is not closed yet, will try archiving it next time 2024-11-11T06:59:25,856 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308334136 to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs/f334e8b0fbdf%2C42887%2C1731308321879.1731308334136 2024-11-11T06:59:25,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741839_1015 (size=7739) 2024-11-11T06:59:25,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741839_1015 (size=7739) 2024-11-11T06:59:27,846 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:29,445 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b1c3e262b7fed51be42f4986d6865f56, had cached 0 bytes from a total of 25018 2024-11-11T06:59:30,051 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:32,255 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:34,459 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:36,461 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T06:59:36,462 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42887%2C1731308321879.1731308376461 2024-11-11T06:59:40,302 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T06:59:41,472 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:41,473 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:41,474 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C42887%2C1731308321879:(num 1731308376461) roll requested 2024-11-11T06:59:41,474 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:41,474 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:41,474 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:41,474 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:41,474 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:41,475 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308365645 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308376461 2024-11-11T06:59:41,476 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45219:45219),(127.0.0.1/127.0.0.1:35447:35447)] 2024-11-11T06:59:41,476 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308365645 is not closed yet, will try archiving it next time 2024-11-11T06:59:41,476 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42887%2C1731308321879.1731308381476 2024-11-11T06:59:41,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741841_1017 (size=4753) 2024-11-11T06:59:41,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741841_1017 (size=4753) 2024-11-11T06:59:46,483 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:46,483 WARN [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42887 {}] regionserver.HRegion(8855): Flush requested on b1c3e262b7fed51be42f4986d6865f56 2024-11-11T06:59:46,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b1c3e262b7fed51be42f4986d6865f56 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T06:59:46,488 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:46,489 WARN [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:48,485 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T06:59:51,486 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:51,486 WARN [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK], DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK]] 2024-11-11T06:59:51,486 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:51,486 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:51,487 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:51,487 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:51,487 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:51,487 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308376461 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308381476 2024-11-11T06:59:51,488 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35447:35447),(127.0.0.1/127.0.0.1:45219:45219)] 2024-11-11T06:59:51,488 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308376461 is not closed yet, will try archiving it next time 2024-11-11T06:59:51,489 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C42887%2C1731308321879:(num 1731308381476) roll requested 2024-11-11T06:59:51,489 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42887%2C1731308321879.1731308391489 2024-11-11T06:59:51,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741842_1018 (size=1569) 2024-11-11T06:59:51,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741842_1018 (size=1569) 2024-11-11T06:59:51,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/32de230224664673b9e3246e5c304bbe is 1080, key is row0015/info:/1731308363236/Put/seqid=0 2024-11-11T06:59:51,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741844_1020 (size=12509) 2024-11-11T06:59:51,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741844_1020 (size=12509) 2024-11-11T06:59:51,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/32de230224664673b9e3246e5c304bbe 2024-11-11T06:59:51,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/32de230224664673b9e3246e5c304bbe as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/32de230224664673b9e3246e5c304bbe 2024-11-11T06:59:51,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/32de230224664673b9e3246e5c304bbe, entries=7, sequenceid=31, filesize=12.2 K 2024-11-11T06:59:56,498 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:56,498 WARN [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:56,521 INFO [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:56,521 WARN [FSHLog-0-hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5-prefix:f334e8b0fbdf,42887,1731308321879 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37861,DS-7e3723af-628a-46be-a32f-fe46cfe29692,DISK], DatanodeInfoWithStorage[127.0.0.1:33949,DS-4e03882f-56b5-44a5-bb5d-2a84d9dceef2,DISK]] 2024-11-11T06:59:56,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for b1c3e262b7fed51be42f4986d6865f56 in 10037ms, sequenceid=31, compaction requested=true 2024-11-11T06:59:56,521 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b1c3e262b7fed51be42f4986d6865f56: 2024-11-11T06:59:56,521 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,521 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,521 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-11T06:59:56,521 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,521 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T06:59:56,521 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/a2019e68b3b74057b3f26dc64b18f9d7 because midkey is the same as first or last row 2024-11-11T06:59:56,521 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,522 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308381476 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308391489 2024-11-11T06:59:56,522 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45219:45219),(127.0.0.1/127.0.0.1:35447:35447)] 2024-11-11T06:59:56,522 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308381476 is not closed yet, will try archiving it next time 2024-11-11T06:59:56,523 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308354206 to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs/f334e8b0fbdf%2C42887%2C1731308321879.1731308354206 2024-11-11T06:59:56,523 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C42887%2C1731308321879:(num 1731308391489) roll requested 2024-11-11T06:59:56,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b1c3e262b7fed51be42f4986d6865f56:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T06:59:56,523 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42887%2C1731308321879.1731308396523 2024-11-11T06:59:56,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741843_1019 (size=438) 2024-11-11T06:59:56,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741843_1019 (size=438) 2024-11-11T06:59:56,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T06:59:56,526 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T06:59:56,526 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308365645 to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs/f334e8b0fbdf%2C42887%2C1731308321879.1731308365645 2024-11-11T06:59:56,528 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308376461 to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs/f334e8b0fbdf%2C42887%2C1731308321879.1731308376461 2024-11-11T06:59:56,529 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T06:59:56,529 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308381476 to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs/f334e8b0fbdf%2C42887%2C1731308321879.1731308381476 2024-11-11T06:59:56,531 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.HStore(1541): b1c3e262b7fed51be42f4986d6865f56/info is initiating minor compaction (all files) 2024-11-11T06:59:56,531 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,531 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,532 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,532 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,532 INFO [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b1c3e262b7fed51be42f4986d6865f56/info in TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T06:59:56,532 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,532 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308391489 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308396523 2024-11-11T06:59:56,532 INFO [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/a2019e68b3b74057b3f26dc64b18f9d7, hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/5469dd55fd684b2ca12a6b870f5fbb01, hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/32de230224664673b9e3246e5c304bbe] into tmpdir=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp, totalSize=36.6 K 2024-11-11T06:59:56,534 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] compactions.Compactor(225): Compacting a2019e68b3b74057b3f26dc64b18f9d7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731308334160 2024-11-11T06:59:56,535 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5469dd55fd684b2ca12a6b870f5fbb01, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731308348195 2024-11-11T06:59:56,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741845_1021 (size=93) 2024-11-11T06:59:56,535 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] compactions.Compactor(225): Compacting 32de230224664673b9e3246e5c304bbe, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731308363236 2024-11-11T06:59:56,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741845_1021 (size=93) 2024-11-11T06:59:56,542 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35447:35447),(127.0.0.1/127.0.0.1:45219:45219)] 2024-11-11T06:59:56,542 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308391489 is not closed yet, will try archiving it next time 2024-11-11T06:59:56,543 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42887%2C1731308321879.1731308396542 2024-11-11T06:59:56,552 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,552 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,552 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,552 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,552 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T06:59:56,553 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308396523 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308396542 2024-11-11T06:59:56,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741846_1022 (size=1258) 2024-11-11T06:59:56,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741846_1022 (size=1258) 2024-11-11T06:59:56,558 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308391489 is not closed yet, will try archiving it next time 2024-11-11T06:59:56,560 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35447:35447),(127.0.0.1/127.0.0.1:45219:45219)] 2024-11-11T06:59:56,560 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308391489 is not closed yet, will try archiving it next time 2024-11-11T06:59:56,570 INFO [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b1c3e262b7fed51be42f4986d6865f56#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T06:59:56,572 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/46c6e218dd2e44bb893059b46223ac00 is 1080, key is row0001/info:/1731308334160/Put/seqid=0 2024-11-11T06:59:56,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741848_1024 (size=27710) 2024-11-11T06:59:56,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741848_1024 (size=27710) 2024-11-11T06:59:56,587 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/46c6e218dd2e44bb893059b46223ac00 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/46c6e218dd2e44bb893059b46223ac00 2024-11-11T06:59:56,603 INFO [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b1c3e262b7fed51be42f4986d6865f56/info of b1c3e262b7fed51be42f4986d6865f56 into 46c6e218dd2e44bb893059b46223ac00(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T06:59:56,603 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b1c3e262b7fed51be42f4986d6865f56: 2024-11-11T06:59:56,605 INFO [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56., storeName=b1c3e262b7fed51be42f4986d6865f56/info, priority=13, startTime=1731308396523; duration=0sec 2024-11-11T06:59:56,605 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T06:59:56,605 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T06:59:56,605 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/46c6e218dd2e44bb893059b46223ac00 because midkey is the same as first or last row 2024-11-11T06:59:56,606 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T06:59:56,606 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T06:59:56,606 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/46c6e218dd2e44bb893059b46223ac00 because midkey is the same as first or last row 2024-11-11T06:59:56,606 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T06:59:56,606 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T06:59:56,606 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/46c6e218dd2e44bb893059b46223ac00 because midkey is the same as first or last row 2024-11-11T06:59:56,606 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T06:59:56,606 DEBUG [RS:0;f334e8b0fbdf:42887-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b1c3e262b7fed51be42f4986d6865f56:info 2024-11-11T06:59:56,936 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/WALs/f334e8b0fbdf,42887,1731308321879/f334e8b0fbdf%2C42887%2C1731308321879.1731308391489 to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs/f334e8b0fbdf%2C42887%2C1731308321879.1731308391489 2024-11-11T07:00:08,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42887 {}] regionserver.HRegion(8855): Flush requested on b1c3e262b7fed51be42f4986d6865f56 2024-11-11T07:00:08,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b1c3e262b7fed51be42f4986d6865f56 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T07:00:08,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/0240fc77ca3644bb915e1be37fc14b50 is 1080, key is row0022/info:/1731308396544/Put/seqid=0 2024-11-11T07:00:08,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741849_1025 (size=12509) 2024-11-11T07:00:08,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741849_1025 (size=12509) 2024-11-11T07:00:08,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/0240fc77ca3644bb915e1be37fc14b50 2024-11-11T07:00:08,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/0240fc77ca3644bb915e1be37fc14b50 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/0240fc77ca3644bb915e1be37fc14b50 2024-11-11T07:00:08,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/0240fc77ca3644bb915e1be37fc14b50, entries=7, sequenceid=42, filesize=12.2 K 2024-11-11T07:00:08,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for b1c3e262b7fed51be42f4986d6865f56 in 33ms, sequenceid=42, compaction requested=false 2024-11-11T07:00:08,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b1c3e262b7fed51be42f4986d6865f56: 2024-11-11T07:00:08,601 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-11T07:00:08,601 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:00:08,601 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/46c6e218dd2e44bb893059b46223ac00 because midkey is the same as first or last row 2024-11-11T07:00:10,302 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T07:00:14,446 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b1c3e262b7fed51be42f4986d6865f56, had cached 0 bytes from a total of 40219 2024-11-11T07:00:16,578 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T07:00:16,578 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:00:16,579 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:00:16,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:16,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:16,584 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T07:00:16,584 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T07:00:16,584 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1590533472, stopped=false 2024-11-11T07:00:16,585 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f334e8b0fbdf,39009,1731308321197 2024-11-11T07:00:16,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:16,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:16,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:16,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:16,587 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:00:16,587 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:00:16,587 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:00:16,588 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:16,588 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:16,588 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f334e8b0fbdf,42887,1731308321879' ***** 2024-11-11T07:00:16,588 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:16,588 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T07:00:16,588 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T07:00:16,589 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T07:00:16,589 INFO [RS:0;f334e8b0fbdf:42887 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T07:00:16,589 INFO [RS:0;f334e8b0fbdf:42887 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T07:00:16,589 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(3091): Received CLOSE for b1c3e262b7fed51be42f4986d6865f56 2024-11-11T07:00:16,590 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(959): stopping server f334e8b0fbdf,42887,1731308321879 2024-11-11T07:00:16,590 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:00:16,590 INFO [RS:0;f334e8b0fbdf:42887 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f334e8b0fbdf:42887. 2024-11-11T07:00:16,590 DEBUG [RS:0;f334e8b0fbdf:42887 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:00:16,590 DEBUG [RS:0;f334e8b0fbdf:42887 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:16,590 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T07:00:16,590 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T07:00:16,590 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b1c3e262b7fed51be42f4986d6865f56, disabling compactions & flushes 2024-11-11T07:00:16,590 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T07:00:16,590 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T07:00:16,590 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T07:00:16,590 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T07:00:16,590 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. after waiting 0 ms 2024-11-11T07:00:16,590 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T07:00:16,590 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing b1c3e262b7fed51be42f4986d6865f56 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-11T07:00:16,591 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T07:00:16,591 DEBUG [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, b1c3e262b7fed51be42f4986d6865f56=TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.} 2024-11-11T07:00:16,591 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:00:16,591 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:00:16,591 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:00:16,591 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:00:16,591 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:00:16,591 DEBUG [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b1c3e262b7fed51be42f4986d6865f56 2024-11-11T07:00:16,591 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-11T07:00:16,597 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/5da160520d284a4db66b3a72af83b320 is 1080, key is row0029/info:/1731308410569/Put/seqid=0 2024-11-11T07:00:16,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741850_1026 (size=8193) 2024-11-11T07:00:16,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741850_1026 (size=8193) 2024-11-11T07:00:16,604 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/5da160520d284a4db66b3a72af83b320 2024-11-11T07:00:16,615 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/.tmp/info/5da160520d284a4db66b3a72af83b320 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/5da160520d284a4db66b3a72af83b320 2024-11-11T07:00:16,615 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/.tmp/info/18af7a90db8b4bcc82b83fd53f263b6f is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56./info:regioninfo/1731308324468/Put/seqid=0 2024-11-11T07:00:16,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741851_1027 (size=7016) 2024-11-11T07:00:16,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741851_1027 (size=7016) 2024-11-11T07:00:16,623 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/.tmp/info/18af7a90db8b4bcc82b83fd53f263b6f 2024-11-11T07:00:16,624 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/5da160520d284a4db66b3a72af83b320, entries=3, sequenceid=48, filesize=8.0 K 2024-11-11T07:00:16,625 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for b1c3e262b7fed51be42f4986d6865f56 in 35ms, sequenceid=48, compaction requested=true 2024-11-11T07:00:16,626 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/a2019e68b3b74057b3f26dc64b18f9d7, hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/5469dd55fd684b2ca12a6b870f5fbb01, hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/32de230224664673b9e3246e5c304bbe] to archive 2024-11-11T07:00:16,629 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T07:00:16,633 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/a2019e68b3b74057b3f26dc64b18f9d7 to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/archive/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/a2019e68b3b74057b3f26dc64b18f9d7 2024-11-11T07:00:16,635 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/5469dd55fd684b2ca12a6b870f5fbb01 to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/archive/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/5469dd55fd684b2ca12a6b870f5fbb01 2024-11-11T07:00:16,637 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/32de230224664673b9e3246e5c304bbe to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/archive/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/info/32de230224664673b9e3246e5c304bbe 2024-11-11T07:00:16,649 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f334e8b0fbdf:39009 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-11T07:00:16,654 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a2019e68b3b74057b3f26dc64b18f9d7=12509, 5469dd55fd684b2ca12a6b870f5fbb01=12509, 32de230224664673b9e3246e5c304bbe=12509] 2024-11-11T07:00:16,655 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/.tmp/ns/b56236731563484bbbb80a30ed6a7720 is 43, key is default/ns:d/1731308323735/Put/seqid=0 2024-11-11T07:00:16,660 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/default/TestLogRolling-testSlowSyncLogRolling/b1c3e262b7fed51be42f4986d6865f56/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-11T07:00:16,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741852_1028 (size=5153) 2024-11-11T07:00:16,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741852_1028 (size=5153) 2024-11-11T07:00:16,663 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T07:00:16,663 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b1c3e262b7fed51be42f4986d6865f56: Waiting for close lock at 1731308416590Running coprocessor pre-close hooks at 1731308416590Disabling compacts and flushes for region at 1731308416590Disabling writes for close at 1731308416590Obtaining lock to block concurrent updates at 1731308416591 (+1 ms)Preparing flush snapshotting stores in b1c3e262b7fed51be42f4986d6865f56 at 1731308416591Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731308416591Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. at 1731308416592 (+1 ms)Flushing b1c3e262b7fed51be42f4986d6865f56/info: creating writer at 1731308416592Flushing b1c3e262b7fed51be42f4986d6865f56/info: appending metadata at 1731308416596 (+4 ms)Flushing b1c3e262b7fed51be42f4986d6865f56/info: closing flushed file at 1731308416596Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c4363d5: reopening flushed file at 1731308416614 (+18 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for b1c3e262b7fed51be42f4986d6865f56 in 35ms, sequenceid=48, compaction requested=true at 1731308416626 (+12 ms)Writing region close event to WAL at 1731308416654 (+28 ms)Running coprocessor post-close hooks at 1731308416661 (+7 ms)Closed at 1731308416663 (+2 ms) 2024-11-11T07:00:16,663 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/.tmp/ns/b56236731563484bbbb80a30ed6a7720 2024-11-11T07:00:16,663 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731308324019.b1c3e262b7fed51be42f4986d6865f56. 2024-11-11T07:00:16,687 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/.tmp/table/9c16d30e99be4bbe808c7c8d89601c64 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731308324484/Put/seqid=0 2024-11-11T07:00:16,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741853_1029 (size=5396) 2024-11-11T07:00:16,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741853_1029 (size=5396) 2024-11-11T07:00:16,694 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/.tmp/table/9c16d30e99be4bbe808c7c8d89601c64 2024-11-11T07:00:16,702 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/.tmp/info/18af7a90db8b4bcc82b83fd53f263b6f as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/info/18af7a90db8b4bcc82b83fd53f263b6f 2024-11-11T07:00:16,709 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/info/18af7a90db8b4bcc82b83fd53f263b6f, entries=10, sequenceid=11, filesize=6.9 K 2024-11-11T07:00:16,710 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/.tmp/ns/b56236731563484bbbb80a30ed6a7720 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/ns/b56236731563484bbbb80a30ed6a7720 2024-11-11T07:00:16,718 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/ns/b56236731563484bbbb80a30ed6a7720, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T07:00:16,719 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/.tmp/table/9c16d30e99be4bbe808c7c8d89601c64 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/table/9c16d30e99be4bbe808c7c8d89601c64 2024-11-11T07:00:16,726 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/table/9c16d30e99be4bbe808c7c8d89601c64, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T07:00:16,728 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false 2024-11-11T07:00:16,734 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T07:00:16,735 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:00:16,735 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:00:16,735 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308416591Running coprocessor pre-close hooks at 1731308416591Disabling compacts and flushes for region at 1731308416591Disabling writes for close at 1731308416591Obtaining lock to block concurrent updates at 1731308416591Preparing flush snapshotting stores in 1588230740 at 1731308416591Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731308416591Flushing stores of hbase:meta,,1.1588230740 at 1731308416592 (+1 ms)Flushing 1588230740/info: creating writer at 1731308416593 (+1 ms)Flushing 1588230740/info: appending metadata at 1731308416615 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731308416615Flushing 1588230740/ns: creating writer at 1731308416631 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731308416654 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1731308416654Flushing 1588230740/table: creating writer at 1731308416671 (+17 ms)Flushing 1588230740/table: appending metadata at 1731308416687 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731308416687Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ed88a51: reopening flushed file at 1731308416701 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@756717cc: reopening flushed file at 1731308416710 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2243b5d0: reopening flushed file at 1731308416718 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false at 1731308416728 (+10 ms)Writing region close event to WAL at 1731308416729 (+1 ms)Running coprocessor post-close hooks at 1731308416734 (+5 ms)Closed at 1731308416735 (+1 ms) 2024-11-11T07:00:16,735 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T07:00:16,791 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(976): stopping server f334e8b0fbdf,42887,1731308321879; all regions closed. 2024-11-11T07:00:16,793 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,793 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,793 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,794 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,794 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741834_1010 (size=3066) 2024-11-11T07:00:16,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741834_1010 (size=3066) 2024-11-11T07:00:16,800 DEBUG [RS:0;f334e8b0fbdf:42887 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs 2024-11-11T07:00:16,800 INFO [RS:0;f334e8b0fbdf:42887 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C42887%2C1731308321879.meta:.meta(num 1731308323587) 2024-11-11T07:00:16,801 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,801 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,801 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,801 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,801 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741847_1023 (size=12695) 2024-11-11T07:00:16,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741847_1023 (size=12695) 2024-11-11T07:00:16,807 DEBUG [RS:0;f334e8b0fbdf:42887 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/oldWALs 2024-11-11T07:00:16,807 INFO [RS:0;f334e8b0fbdf:42887 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C42887%2C1731308321879:(num 1731308396542) 2024-11-11T07:00:16,808 DEBUG [RS:0;f334e8b0fbdf:42887 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:16,808 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:00:16,808 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:00:16,808 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.ChoreService(370): Chore service for: regionserver/f334e8b0fbdf:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T07:00:16,808 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:00:16,808 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:00:16,809 INFO [RS:0;f334e8b0fbdf:42887 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42887 2024-11-11T07:00:16,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f334e8b0fbdf,42887,1731308321879 2024-11-11T07:00:16,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:00:16,813 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:00:16,814 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f334e8b0fbdf,42887,1731308321879] 2024-11-11T07:00:16,817 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f334e8b0fbdf,42887,1731308321879 already deleted, retry=false 2024-11-11T07:00:16,818 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f334e8b0fbdf,42887,1731308321879 expired; onlineServers=0 2024-11-11T07:00:16,818 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f334e8b0fbdf,39009,1731308321197' ***** 2024-11-11T07:00:16,818 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T07:00:16,818 INFO [M:0;f334e8b0fbdf:39009 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:00:16,818 INFO [M:0;f334e8b0fbdf:39009 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:00:16,818 DEBUG [M:0;f334e8b0fbdf:39009 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T07:00:16,818 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T07:00:16,818 DEBUG [M:0;f334e8b0fbdf:39009 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T07:00:16,818 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308322841 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308322841,5,FailOnTimeoutGroup] 2024-11-11T07:00:16,818 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308322842 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308322842,5,FailOnTimeoutGroup] 2024-11-11T07:00:16,819 INFO [M:0;f334e8b0fbdf:39009 {}] hbase.ChoreService(370): Chore service for: master/f334e8b0fbdf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T07:00:16,819 INFO [M:0;f334e8b0fbdf:39009 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:00:16,819 DEBUG [M:0;f334e8b0fbdf:39009 {}] master.HMaster(1795): Stopping service threads 2024-11-11T07:00:16,819 INFO [M:0;f334e8b0fbdf:39009 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T07:00:16,819 INFO [M:0;f334e8b0fbdf:39009 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:00:16,820 INFO [M:0;f334e8b0fbdf:39009 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T07:00:16,820 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T07:00:16,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T07:00:16,821 DEBUG [M:0;f334e8b0fbdf:39009 {}] zookeeper.ZKUtil(347): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T07:00:16,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:16,821 WARN [M:0;f334e8b0fbdf:39009 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T07:00:16,822 INFO [M:0;f334e8b0fbdf:39009 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/.lastflushedseqids 2024-11-11T07:00:16,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741854_1030 (size=130) 2024-11-11T07:00:16,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741854_1030 (size=130) 2024-11-11T07:00:16,835 INFO [M:0;f334e8b0fbdf:39009 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T07:00:16,835 INFO [M:0;f334e8b0fbdf:39009 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T07:00:16,835 DEBUG [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:00:16,835 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:16,835 DEBUG [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:16,835 DEBUG [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:00:16,835 DEBUG [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:16,836 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-11T07:00:16,854 DEBUG [M:0;f334e8b0fbdf:39009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/923bb993e2f34017b9071bf03044a3f1 is 82, key is hbase:meta,,1/info:regioninfo/1731308323658/Put/seqid=0 2024-11-11T07:00:16,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741855_1031 (size=5672) 2024-11-11T07:00:16,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741855_1031 (size=5672) 2024-11-11T07:00:16,861 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/923bb993e2f34017b9071bf03044a3f1 2024-11-11T07:00:16,885 DEBUG [M:0;f334e8b0fbdf:39009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d8df4837caf147bd8c2b2be5d5ead9f8 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731308324490/Put/seqid=0 2024-11-11T07:00:16,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741856_1032 (size=6247) 2024-11-11T07:00:16,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741856_1032 (size=6247) 2024-11-11T07:00:16,892 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d8df4837caf147bd8c2b2be5d5ead9f8 2024-11-11T07:00:16,899 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d8df4837caf147bd8c2b2be5d5ead9f8 2024-11-11T07:00:16,915 DEBUG [M:0;f334e8b0fbdf:39009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14ea86e54f9e471caecb9e6bfaab36c3 is 69, key is f334e8b0fbdf,42887,1731308321879/rs:state/1731308322922/Put/seqid=0 2024-11-11T07:00:16,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:00:16,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42887-0x10105d260fd0001, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:00:16,916 INFO [RS:0;f334e8b0fbdf:42887 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:00:16,916 INFO [RS:0;f334e8b0fbdf:42887 {}] regionserver.HRegionServer(1031): Exiting; stopping=f334e8b0fbdf,42887,1731308321879; zookeeper connection closed. 2024-11-11T07:00:16,916 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6981d0ca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6981d0ca 2024-11-11T07:00:16,917 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T07:00:16,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741857_1033 (size=5156) 2024-11-11T07:00:16,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741857_1033 (size=5156) 2024-11-11T07:00:16,922 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14ea86e54f9e471caecb9e6bfaab36c3 2024-11-11T07:00:16,944 DEBUG [M:0;f334e8b0fbdf:39009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ece15a4c0abc477799ba869162dd7ee3 is 52, key is load_balancer_on/state:d/1731308323993/Put/seqid=0 2024-11-11T07:00:16,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741858_1034 (size=5056) 2024-11-11T07:00:16,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741858_1034 (size=5056) 2024-11-11T07:00:16,952 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ece15a4c0abc477799ba869162dd7ee3 2024-11-11T07:00:16,960 DEBUG [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/923bb993e2f34017b9071bf03044a3f1 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/923bb993e2f34017b9071bf03044a3f1 2024-11-11T07:00:16,966 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/923bb993e2f34017b9071bf03044a3f1, entries=8, sequenceid=59, filesize=5.5 K 2024-11-11T07:00:16,968 DEBUG [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d8df4837caf147bd8c2b2be5d5ead9f8 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d8df4837caf147bd8c2b2be5d5ead9f8 2024-11-11T07:00:16,974 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d8df4837caf147bd8c2b2be5d5ead9f8 2024-11-11T07:00:16,974 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d8df4837caf147bd8c2b2be5d5ead9f8, entries=6, sequenceid=59, filesize=6.1 K 2024-11-11T07:00:16,975 DEBUG [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14ea86e54f9e471caecb9e6bfaab36c3 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/14ea86e54f9e471caecb9e6bfaab36c3 2024-11-11T07:00:16,982 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/14ea86e54f9e471caecb9e6bfaab36c3, entries=1, sequenceid=59, filesize=5.0 K 2024-11-11T07:00:16,984 DEBUG [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ece15a4c0abc477799ba869162dd7ee3 as hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ece15a4c0abc477799ba869162dd7ee3 2024-11-11T07:00:16,991 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ece15a4c0abc477799ba869162dd7ee3, entries=1, sequenceid=59, filesize=4.9 K 2024-11-11T07:00:16,993 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 158ms, sequenceid=59, compaction requested=false 2024-11-11T07:00:16,995 INFO [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:16,995 DEBUG [M:0;f334e8b0fbdf:39009 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308416835Disabling compacts and flushes for region at 1731308416835Disabling writes for close at 1731308416835Obtaining lock to block concurrent updates at 1731308416836 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731308416836Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731308416837 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731308416837Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731308416837Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731308416854 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731308416854Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731308416868 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731308416885 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731308416885Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731308416899 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731308416914 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731308416914Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731308416928 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731308416943 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731308416943Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30e4ba36: reopening flushed file at 1731308416959 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64df473e: reopening flushed file at 1731308416967 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36e7c983: reopening flushed file at 1731308416975 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@385723d2: reopening flushed file at 1731308416983 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 158ms, sequenceid=59, compaction requested=false at 1731308416993 (+10 ms)Writing region close event to WAL at 1731308416995 (+2 ms)Closed at 1731308416995 2024-11-11T07:00:16,996 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,996 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,996 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,997 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,997 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:16,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33949 is added to blk_1073741830_1006 (size=27973) 2024-11-11T07:00:16,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37861 is added to blk_1073741830_1006 (size=27973) 2024-11-11T07:00:17,000 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:00:17,000 INFO [M:0;f334e8b0fbdf:39009 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T07:00:17,001 INFO [M:0;f334e8b0fbdf:39009 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39009 2024-11-11T07:00:17,001 INFO [M:0;f334e8b0fbdf:39009 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:00:17,009 INFO [regionserver/f334e8b0fbdf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:00:17,103 INFO [M:0;f334e8b0fbdf:39009 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:00:17,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:00:17,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39009-0x10105d260fd0000, quorum=127.0.0.1:58137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:00:17,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:17,110 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:00:17,110 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:00:17,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:00:17,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/hadoop.log.dir/,STOPPED} 2024-11-11T07:00:17,114 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:00:17,114 WARN [BP-1500220386-172.17.0.2-1731308318312 heartbeating to localhost/127.0.0.1:45731 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:00:17,114 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:00:17,114 WARN [BP-1500220386-172.17.0.2-1731308318312 heartbeating to localhost/127.0.0.1:45731 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1500220386-172.17.0.2-1731308318312 (Datanode Uuid bd568af2-f574-4267-a040-ae0e1b3e2746) service to localhost/127.0.0.1:45731 2024-11-11T07:00:17,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/data/data3/current/BP-1500220386-172.17.0.2-1731308318312 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:17,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/data/data4/current/BP-1500220386-172.17.0.2-1731308318312 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:17,116 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:00:17,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:17,119 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:00:17,119 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:00:17,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:00:17,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/hadoop.log.dir/,STOPPED} 2024-11-11T07:00:17,122 WARN [BP-1500220386-172.17.0.2-1731308318312 heartbeating to localhost/127.0.0.1:45731 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:00:17,122 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:00:17,122 WARN [BP-1500220386-172.17.0.2-1731308318312 heartbeating to localhost/127.0.0.1:45731 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1500220386-172.17.0.2-1731308318312 (Datanode Uuid 818f538e-f155-43f3-aa2e-cc37f287c309) service to localhost/127.0.0.1:45731 2024-11-11T07:00:17,122 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:00:17,123 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/data/data1/current/BP-1500220386-172.17.0.2-1731308318312 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:17,123 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/cluster_b61b4310-de07-4989-6b6d-aefb5d03a426/data/data2/current/BP-1500220386-172.17.0.2-1731308318312 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:17,123 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:00:17,136 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:00:17,137 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:00:17,137 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:00:17,138 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:00:17,138 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/hadoop.log.dir/,STOPPED} 2024-11-11T07:00:17,148 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T07:00:17,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T07:00:17,189 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45731 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45731 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45731 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45731 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45731 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:45731 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/f334e8b0fbdf:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/f334e8b0fbdf:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/f334e8b0fbdf:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45731 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45731 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@4d9953b6 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=50 (was 10) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9550 (was 10117) 2024-11-11T07:00:17,196 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=50, ProcessCount=11, AvailableMemoryMB=9549 2024-11-11T07:00:17,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T07:00:17,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/hadoop.log.dir so I do NOT create it in target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c 2024-11-11T07:00:17,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae032100-1bd2-b97e-df1e-bc9dad623ec3/hadoop.tmp.dir so I do NOT create it in target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c 2024-11-11T07:00:17,197 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01, deleteOnExit=true 2024-11-11T07:00:17,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T07:00:17,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/test.cache.data in system properties and HBase conf 2024-11-11T07:00:17,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T07:00:17,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/hadoop.log.dir in system properties and HBase conf 2024-11-11T07:00:17,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T07:00:17,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T07:00:17,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T07:00:17,198 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T07:00:17,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:00:17,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:00:17,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T07:00:17,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:00:17,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T07:00:17,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T07:00:17,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:00:17,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:00:17,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T07:00:17,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/nfs.dump.dir in system properties and HBase conf 2024-11-11T07:00:17,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/java.io.tmpdir in system properties and HBase conf 2024-11-11T07:00:17,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:00:17,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T07:00:17,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T07:00:17,215 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:00:17,299 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:17,305 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:00:17,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:00:17,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:00:17,308 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:00:17,309 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:17,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3197ca45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:00:17,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45e3157d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:00:17,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@511dc70f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/java.io.tmpdir/jetty-localhost-38397-hadoop-hdfs-3_4_1-tests_jar-_-any-15385743675787237226/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:00:17,427 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e469283{HTTP/1.1, (http/1.1)}{localhost:38397} 2024-11-11T07:00:17,427 INFO [Time-limited test {}] server.Server(415): Started @101006ms 2024-11-11T07:00:17,442 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:00:17,518 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:17,522 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:00:17,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:00:17,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:00:17,525 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:00:17,525 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4edee9ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:00:17,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@276f8783{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:00:17,641 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d4bdc00{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/java.io.tmpdir/jetty-localhost-41533-hadoop-hdfs-3_4_1-tests_jar-_-any-14647399756044147752/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:17,641 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@687b21ce{HTTP/1.1, (http/1.1)}{localhost:41533} 2024-11-11T07:00:17,641 INFO [Time-limited test {}] server.Server(415): Started @101221ms 2024-11-11T07:00:17,643 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:00:17,681 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:17,685 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:00:17,686 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:00:17,686 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:00:17,686 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:00:17,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b44e274{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:00:17,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@376d199b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:00:17,745 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/data/data1/current/BP-1769345365-172.17.0.2-1731308417235/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:17,745 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/data/data2/current/BP-1769345365-172.17.0.2-1731308417235/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:17,769 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:00:17,772 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdea9c46f0b4a6e15 with lease ID 0xbebf83a5cff65dfd: Processing first storage report for DS-1b43c8a7-4d43-4b4b-90c8-b6e7e964ef66 from datanode DatanodeRegistration(127.0.0.1:43347, datanodeUuid=2b62e074-7167-4a12-be57-b6df95df27e4, infoPort=33709, infoSecurePort=0, ipcPort=44723, storageInfo=lv=-57;cid=testClusterID;nsid=597623059;c=1731308417235) 2024-11-11T07:00:17,772 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdea9c46f0b4a6e15 with lease ID 0xbebf83a5cff65dfd: from storage DS-1b43c8a7-4d43-4b4b-90c8-b6e7e964ef66 node DatanodeRegistration(127.0.0.1:43347, datanodeUuid=2b62e074-7167-4a12-be57-b6df95df27e4, infoPort=33709, infoSecurePort=0, ipcPort=44723, storageInfo=lv=-57;cid=testClusterID;nsid=597623059;c=1731308417235), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:17,773 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdea9c46f0b4a6e15 with lease ID 0xbebf83a5cff65dfd: Processing first storage report for DS-9146f0b6-1c89-4e46-aada-9e775ceb603a from datanode DatanodeRegistration(127.0.0.1:43347, datanodeUuid=2b62e074-7167-4a12-be57-b6df95df27e4, infoPort=33709, infoSecurePort=0, ipcPort=44723, storageInfo=lv=-57;cid=testClusterID;nsid=597623059;c=1731308417235) 2024-11-11T07:00:17,773 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdea9c46f0b4a6e15 with lease ID 0xbebf83a5cff65dfd: from storage DS-9146f0b6-1c89-4e46-aada-9e775ceb603a node DatanodeRegistration(127.0.0.1:43347, datanodeUuid=2b62e074-7167-4a12-be57-b6df95df27e4, infoPort=33709, infoSecurePort=0, ipcPort=44723, storageInfo=lv=-57;cid=testClusterID;nsid=597623059;c=1731308417235), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T07:00:17,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@824b6ae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/java.io.tmpdir/jetty-localhost-33237-hadoop-hdfs-3_4_1-tests_jar-_-any-3592101487599046339/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:17,811 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d7e5c06{HTTP/1.1, (http/1.1)}{localhost:33237} 2024-11-11T07:00:17,811 INFO [Time-limited test {}] server.Server(415): Started @101390ms 2024-11-11T07:00:17,813 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:00:17,915 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/data/data4/current/BP-1769345365-172.17.0.2-1731308417235/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:17,915 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/data/data3/current/BP-1769345365-172.17.0.2-1731308417235/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:17,932 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:00:17,935 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22d8dff90229c2ce with lease ID 0xbebf83a5cff65dfe: Processing first storage report for DS-6a210805-050e-4d7e-8a86-9857e94397a6 from datanode DatanodeRegistration(127.0.0.1:38405, datanodeUuid=3ecb14db-c810-416b-aefa-fdec9d461727, infoPort=42119, infoSecurePort=0, ipcPort=39951, storageInfo=lv=-57;cid=testClusterID;nsid=597623059;c=1731308417235) 2024-11-11T07:00:17,935 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22d8dff90229c2ce with lease ID 0xbebf83a5cff65dfe: from storage DS-6a210805-050e-4d7e-8a86-9857e94397a6 node DatanodeRegistration(127.0.0.1:38405, datanodeUuid=3ecb14db-c810-416b-aefa-fdec9d461727, infoPort=42119, infoSecurePort=0, ipcPort=39951, storageInfo=lv=-57;cid=testClusterID;nsid=597623059;c=1731308417235), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:17,935 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22d8dff90229c2ce with lease ID 0xbebf83a5cff65dfe: Processing first storage report for DS-aa880b62-9132-47db-b92e-4a0ad1df5574 from datanode DatanodeRegistration(127.0.0.1:38405, datanodeUuid=3ecb14db-c810-416b-aefa-fdec9d461727, infoPort=42119, infoSecurePort=0, ipcPort=39951, storageInfo=lv=-57;cid=testClusterID;nsid=597623059;c=1731308417235) 2024-11-11T07:00:17,935 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22d8dff90229c2ce with lease ID 0xbebf83a5cff65dfe: from storage DS-aa880b62-9132-47db-b92e-4a0ad1df5574 node DatanodeRegistration(127.0.0.1:38405, datanodeUuid=3ecb14db-c810-416b-aefa-fdec9d461727, infoPort=42119, infoSecurePort=0, ipcPort=39951, storageInfo=lv=-57;cid=testClusterID;nsid=597623059;c=1731308417235), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:17,942 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c 2024-11-11T07:00:17,944 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/zookeeper_0, clientPort=64854, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T07:00:17,945 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64854 2024-11-11T07:00:17,946 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:17,947 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:17,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:00:17,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:00:17,961 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9 with version=8 2024-11-11T07:00:17,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/hbase-staging 2024-11-11T07:00:17,963 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:00:17,963 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:17,964 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:17,964 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:00:17,964 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:17,964 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:00:17,964 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T07:00:17,964 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:00:17,965 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44857 2024-11-11T07:00:17,966 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44857 connecting to ZooKeeper ensemble=127.0.0.1:64854 2024-11-11T07:00:17,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:448570x0, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:00:17,974 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44857-0x10105d3de270000 connected 2024-11-11T07:00:17,989 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:17,991 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:17,993 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:17,993 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9, hbase.cluster.distributed=false 2024-11-11T07:00:17,995 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:00:17,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44857 2024-11-11T07:00:17,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44857 2024-11-11T07:00:17,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44857 2024-11-11T07:00:17,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44857 2024-11-11T07:00:17,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44857 2024-11-11T07:00:18,013 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:00:18,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:18,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:18,013 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:00:18,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:18,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:00:18,014 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T07:00:18,014 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:00:18,015 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34611 2024-11-11T07:00:18,016 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34611 connecting to ZooKeeper ensemble=127.0.0.1:64854 2024-11-11T07:00:18,017 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:18,019 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:18,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:346110x0, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:00:18,025 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:346110x0, quorum=127.0.0.1:64854, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:18,026 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34611-0x10105d3de270001 connected 2024-11-11T07:00:18,026 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T07:00:18,027 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T07:00:18,027 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T07:00:18,029 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:00:18,032 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34611 2024-11-11T07:00:18,033 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34611 2024-11-11T07:00:18,033 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34611 2024-11-11T07:00:18,033 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34611 2024-11-11T07:00:18,036 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34611 2024-11-11T07:00:18,049 DEBUG [M:0;f334e8b0fbdf:44857 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f334e8b0fbdf:44857 2024-11-11T07:00:18,049 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f334e8b0fbdf,44857,1731308417963 2024-11-11T07:00:18,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:00:18,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:00:18,051 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f334e8b0fbdf,44857,1731308417963 2024-11-11T07:00:18,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T07:00:18,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,054 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T07:00:18,054 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f334e8b0fbdf,44857,1731308417963 from backup master directory 2024-11-11T07:00:18,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f334e8b0fbdf,44857,1731308417963 2024-11-11T07:00:18,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:00:18,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:00:18,056 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:00:18,056 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f334e8b0fbdf,44857,1731308417963 2024-11-11T07:00:18,062 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/hbase.id] with ID: be0f7104-a609-4f7e-93de-d5eaa5dfd901 2024-11-11T07:00:18,062 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/.tmp/hbase.id 2024-11-11T07:00:18,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:00:18,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:00:18,071 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/.tmp/hbase.id]:[hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/hbase.id] 2024-11-11T07:00:18,085 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:18,085 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T07:00:18,087 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-11T07:00:18,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:00:18,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:00:18,107 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T07:00:18,108 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T07:00:18,109 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:00:18,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:00:18,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:00:18,117 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store 2024-11-11T07:00:18,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:00:18,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:00:18,125 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:00:18,125 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:00:18,125 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:18,125 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:18,125 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:00:18,125 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:18,125 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:18,126 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308418125Disabling compacts and flushes for region at 1731308418125Disabling writes for close at 1731308418125Writing region close event to WAL at 1731308418125Closed at 1731308418125 2024-11-11T07:00:18,127 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/.initializing 2024-11-11T07:00:18,127 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/WALs/f334e8b0fbdf,44857,1731308417963 2024-11-11T07:00:18,131 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C44857%2C1731308417963, suffix=, logDir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/WALs/f334e8b0fbdf,44857,1731308417963, archiveDir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/oldWALs, maxLogs=10 2024-11-11T07:00:18,131 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C44857%2C1731308417963.1731308418131 2024-11-11T07:00:18,137 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/WALs/f334e8b0fbdf,44857,1731308417963/f334e8b0fbdf%2C44857%2C1731308417963.1731308418131 2024-11-11T07:00:18,138 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33709:33709),(127.0.0.1/127.0.0.1:42119:42119)] 2024-11-11T07:00:18,139 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:00:18,139 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:00:18,139 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,139 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,141 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,142 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T07:00:18,142 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:18,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T07:00:18,144 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,145 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:00:18,145 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T07:00:18,147 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:00:18,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T07:00:18,149 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,150 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:00:18,150 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,151 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,151 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,153 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,153 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,153 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T07:00:18,154 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:18,157 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:00:18,157 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780701, jitterRate=-0.007287576794624329}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T07:00:18,159 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731308418139Initializing all the Stores at 1731308418140 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308418140Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308418140Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308418140Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308418140Cleaning up temporary data from old regions at 1731308418153 (+13 ms)Region opened successfully at 1731308418159 (+6 ms) 2024-11-11T07:00:18,159 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T07:00:18,163 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c9a3303, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:00:18,164 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T07:00:18,164 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T07:00:18,164 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T07:00:18,165 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T07:00:18,165 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T07:00:18,165 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T07:00:18,166 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T07:00:18,168 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T07:00:18,169 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T07:00:18,170 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T07:00:18,171 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T07:00:18,171 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T07:00:18,173 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T07:00:18,173 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T07:00:18,174 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T07:00:18,175 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T07:00:18,176 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T07:00:18,179 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T07:00:18,181 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T07:00:18,182 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T07:00:18,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:18,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:18,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,184 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f334e8b0fbdf,44857,1731308417963, sessionid=0x10105d3de270000, setting cluster-up flag (Was=false) 2024-11-11T07:00:18,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,193 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T07:00:18,194 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,44857,1731308417963 2024-11-11T07:00:18,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,205 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T07:00:18,206 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,44857,1731308417963 2024-11-11T07:00:18,207 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T07:00:18,209 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T07:00:18,209 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T07:00:18,210 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T07:00:18,210 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f334e8b0fbdf,44857,1731308417963 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T07:00:18,211 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:00:18,211 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:00:18,211 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:00:18,211 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:00:18,212 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f334e8b0fbdf:0, corePoolSize=10, maxPoolSize=10 2024-11-11T07:00:18,212 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,212 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:00:18,212 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,213 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731308448213 2024-11-11T07:00:18,213 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T07:00:18,213 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T07:00:18,214 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T07:00:18,214 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T07:00:18,214 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T07:00:18,214 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T07:00:18,214 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:00:18,214 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,214 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T07:00:18,214 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T07:00:18,214 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T07:00:18,214 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T07:00:18,215 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T07:00:18,215 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T07:00:18,215 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308418215,5,FailOnTimeoutGroup] 2024-11-11T07:00:18,215 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,215 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308418215,5,FailOnTimeoutGroup] 2024-11-11T07:00:18,215 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,215 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T07:00:18,215 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,215 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,215 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T07:00:18,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:00:18,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:00:18,224 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T07:00:18,224 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9 2024-11-11T07:00:18,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:00:18,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:00:18,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:00:18,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:00:18,238 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:00:18,238 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:18,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:00:18,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:00:18,240 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:18,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:00:18,242 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(746): ClusterId : be0f7104-a609-4f7e-93de-d5eaa5dfd901 2024-11-11T07:00:18,242 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T07:00:18,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:00:18,242 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:18,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:00:18,245 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:00:18,245 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,245 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T07:00:18,245 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T07:00:18,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:18,246 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:00:18,246 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740 2024-11-11T07:00:18,247 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740 2024-11-11T07:00:18,247 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T07:00:18,248 DEBUG [RS:0;f334e8b0fbdf:34611 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f6988b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:00:18,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:00:18,249 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:00:18,249 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:00:18,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:00:18,253 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:00:18,253 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773352, jitterRate=-0.016632959246635437}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:00:18,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731308418234Initializing all the Stores at 1731308418236 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308418236Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308418236Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308418236Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308418236Cleaning up temporary data from old regions at 1731308418249 (+13 ms)Region opened successfully at 1731308418255 (+6 ms) 2024-11-11T07:00:18,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:00:18,255 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:00:18,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:00:18,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:00:18,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:00:18,256 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:00:18,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308418255Disabling compacts and flushes for region at 1731308418255Disabling writes for close at 1731308418255Writing region close event to WAL at 1731308418255Closed at 1731308418255 2024-11-11T07:00:18,257 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:00:18,257 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T07:00:18,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T07:00:18,259 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:00:18,261 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T07:00:18,268 DEBUG [RS:0;f334e8b0fbdf:34611 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f334e8b0fbdf:34611 2024-11-11T07:00:18,268 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T07:00:18,268 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T07:00:18,268 DEBUG [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T07:00:18,269 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(2659): reportForDuty to master=f334e8b0fbdf,44857,1731308417963 with port=34611, startcode=1731308418013 2024-11-11T07:00:18,269 DEBUG [RS:0;f334e8b0fbdf:34611 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T07:00:18,272 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51255, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T07:00:18,273 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44857 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:18,273 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44857 {}] master.ServerManager(517): Registering regionserver=f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:18,275 DEBUG [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9 2024-11-11T07:00:18,275 DEBUG [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45449 2024-11-11T07:00:18,276 DEBUG [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T07:00:18,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:00:18,278 DEBUG [RS:0;f334e8b0fbdf:34611 {}] zookeeper.ZKUtil(111): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:18,279 WARN [RS:0;f334e8b0fbdf:34611 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:00:18,279 INFO [RS:0;f334e8b0fbdf:34611 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:00:18,279 DEBUG [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/WALs/f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:18,279 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f334e8b0fbdf,34611,1731308418013] 2024-11-11T07:00:18,283 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T07:00:18,287 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T07:00:18,288 INFO [RS:0;f334e8b0fbdf:34611 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T07:00:18,288 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,292 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T07:00:18,293 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T07:00:18,293 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:00:18,294 DEBUG [RS:0;f334e8b0fbdf:34611 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:00:18,295 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,295 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,295 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,295 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,295 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,295 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,34611,1731308418013-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:00:18,311 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T07:00:18,311 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,34611,1731308418013-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,311 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,311 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.Replication(171): f334e8b0fbdf,34611,1731308418013 started 2024-11-11T07:00:18,327 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,327 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(1482): Serving as f334e8b0fbdf,34611,1731308418013, RpcServer on f334e8b0fbdf/172.17.0.2:34611, sessionid=0x10105d3de270001 2024-11-11T07:00:18,327 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T07:00:18,327 DEBUG [RS:0;f334e8b0fbdf:34611 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:18,327 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,34611,1731308418013' 2024-11-11T07:00:18,327 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T07:00:18,328 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T07:00:18,329 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T07:00:18,329 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T07:00:18,329 DEBUG [RS:0;f334e8b0fbdf:34611 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:18,329 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,34611,1731308418013' 2024-11-11T07:00:18,329 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T07:00:18,329 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T07:00:18,330 DEBUG [RS:0;f334e8b0fbdf:34611 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T07:00:18,330 INFO [RS:0;f334e8b0fbdf:34611 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T07:00:18,330 INFO [RS:0;f334e8b0fbdf:34611 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T07:00:18,411 WARN [f334e8b0fbdf:44857 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T07:00:18,432 INFO [RS:0;f334e8b0fbdf:34611 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C34611%2C1731308418013, suffix=, logDir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/WALs/f334e8b0fbdf,34611,1731308418013, archiveDir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/oldWALs, maxLogs=32 2024-11-11T07:00:18,434 INFO [RS:0;f334e8b0fbdf:34611 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C34611%2C1731308418013.1731308418434 2024-11-11T07:00:18,441 INFO [RS:0;f334e8b0fbdf:34611 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/WALs/f334e8b0fbdf,34611,1731308418013/f334e8b0fbdf%2C34611%2C1731308418013.1731308418434 2024-11-11T07:00:18,441 DEBUG [RS:0;f334e8b0fbdf:34611 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42119:42119),(127.0.0.1/127.0.0.1:33709:33709)] 2024-11-11T07:00:18,662 DEBUG [f334e8b0fbdf:44857 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T07:00:18,662 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:18,664 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,34611,1731308418013, state=OPENING 2024-11-11T07:00:18,667 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T07:00:18,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,669 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:00:18,669 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:00:18,669 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:00:18,669 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,34611,1731308418013}] 2024-11-11T07:00:18,823 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T07:00:18,825 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35933, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T07:00:18,829 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T07:00:18,830 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:00:18,832 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C34611%2C1731308418013.meta, suffix=.meta, logDir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/WALs/f334e8b0fbdf,34611,1731308418013, archiveDir=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/oldWALs, maxLogs=32 2024-11-11T07:00:18,833 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C34611%2C1731308418013.meta.1731308418833.meta 2024-11-11T07:00:18,839 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/WALs/f334e8b0fbdf,34611,1731308418013/f334e8b0fbdf%2C34611%2C1731308418013.meta.1731308418833.meta 2024-11-11T07:00:18,840 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42119:42119),(127.0.0.1/127.0.0.1:33709:33709)] 2024-11-11T07:00:18,841 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:00:18,841 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T07:00:18,841 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T07:00:18,841 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T07:00:18,841 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T07:00:18,841 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:00:18,842 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T07:00:18,842 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T07:00:18,843 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:00:18,844 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:00:18,844 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:18,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:00:18,846 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:00:18,846 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,846 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:18,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:00:18,847 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:00:18,847 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:18,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:00:18,849 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:00:18,849 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:18,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:18,850 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:00:18,851 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740 2024-11-11T07:00:18,852 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740 2024-11-11T07:00:18,854 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:00:18,854 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:00:18,855 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:00:18,857 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:00:18,858 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781019, jitterRate=-0.006883725523948669}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:00:18,858 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T07:00:18,859 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731308418842Writing region info on filesystem at 1731308418842Initializing all the Stores at 1731308418843 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308418843Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308418843Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308418843Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308418843Cleaning up temporary data from old regions at 1731308418854 (+11 ms)Running coprocessor post-open hooks at 1731308418858 (+4 ms)Region opened successfully at 1731308418859 (+1 ms) 2024-11-11T07:00:18,860 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731308418822 2024-11-11T07:00:18,863 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T07:00:18,863 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T07:00:18,864 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:18,866 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,34611,1731308418013, state=OPEN 2024-11-11T07:00:18,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:00:18,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:00:18,870 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:18,870 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:00:18,870 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:00:18,874 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T07:00:18,874 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,34611,1731308418013 in 201 msec 2024-11-11T07:00:18,877 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T07:00:18,877 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 617 msec 2024-11-11T07:00:18,878 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:00:18,878 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T07:00:18,879 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:00:18,879 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,34611,1731308418013, seqNum=-1] 2024-11-11T07:00:18,880 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:00:18,881 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42931, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:00:18,888 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 678 msec 2024-11-11T07:00:18,888 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731308418888, completionTime=-1 2024-11-11T07:00:18,888 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T07:00:18,888 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T07:00:18,890 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-11T07:00:18,890 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731308478890 2024-11-11T07:00:18,890 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731308538890 2024-11-11T07:00:18,890 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-11T07:00:18,890 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,44857,1731308417963-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,890 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,44857,1731308417963-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,890 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,44857,1731308417963-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,890 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f334e8b0fbdf:44857, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,890 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,891 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,893 DEBUG [master/f334e8b0fbdf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T07:00:18,895 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.839sec 2024-11-11T07:00:18,895 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T07:00:18,895 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T07:00:18,895 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T07:00:18,895 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T07:00:18,895 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T07:00:18,895 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,44857,1731308417963-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:00:18,895 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,44857,1731308417963-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T07:00:18,898 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T07:00:18,898 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T07:00:18,898 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,44857,1731308417963-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:18,942 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ffb657f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:00:18,942 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f334e8b0fbdf,44857,-1 for getting cluster id 2024-11-11T07:00:18,942 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T07:00:18,944 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be0f7104-a609-4f7e-93de-d5eaa5dfd901' 2024-11-11T07:00:18,945 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T07:00:18,945 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be0f7104-a609-4f7e-93de-d5eaa5dfd901" 2024-11-11T07:00:18,945 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@564619b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:00:18,945 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f334e8b0fbdf,44857,-1] 2024-11-11T07:00:18,946 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T07:00:18,946 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:18,948 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55120, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T07:00:18,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e5d9cb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:00:18,949 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:00:18,951 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,34611,1731308418013, seqNum=-1] 2024-11-11T07:00:18,951 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:00:18,953 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56068, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:00:18,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f334e8b0fbdf,44857,1731308417963 2024-11-11T07:00:18,956 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:18,959 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T07:00:18,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T07:00:18,960 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:00:18,960 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:00:18,960 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:18,960 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:18,960 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T07:00:18,960 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T07:00:18,960 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=398545913, stopped=false 2024-11-11T07:00:18,960 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f334e8b0fbdf,44857,1731308417963 2024-11-11T07:00:18,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:18,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:18,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:18,964 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:00:18,964 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:00:18,965 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:00:18,965 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:18,965 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:18,965 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:18,965 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f334e8b0fbdf,34611,1731308418013' ***** 2024-11-11T07:00:18,965 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T07:00:18,965 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T07:00:18,966 INFO [RS:0;f334e8b0fbdf:34611 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T07:00:18,966 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T07:00:18,966 INFO [RS:0;f334e8b0fbdf:34611 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T07:00:18,966 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(959): stopping server f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:18,966 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:00:18,966 INFO [RS:0;f334e8b0fbdf:34611 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f334e8b0fbdf:34611. 2024-11-11T07:00:18,966 DEBUG [RS:0;f334e8b0fbdf:34611 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:00:18,966 DEBUG [RS:0;f334e8b0fbdf:34611 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:18,966 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T07:00:18,966 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T07:00:18,966 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T07:00:18,966 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T07:00:18,967 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T07:00:18,967 DEBUG [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T07:00:18,967 DEBUG [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T07:00:18,967 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:00:18,967 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:00:18,967 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:00:18,967 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:00:18,967 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:00:18,967 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-11T07:00:18,986 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740/.tmp/ns/6b5f86dc95d945379474666d1fe0f93a is 43, key is default/ns:d/1731308418882/Put/seqid=0 2024-11-11T07:00:18,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741835_1011 (size=5153) 2024-11-11T07:00:18,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741835_1011 (size=5153) 2024-11-11T07:00:18,993 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740/.tmp/ns/6b5f86dc95d945379474666d1fe0f93a 2024-11-11T07:00:19,002 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740/.tmp/ns/6b5f86dc95d945379474666d1fe0f93a as hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740/ns/6b5f86dc95d945379474666d1fe0f93a 2024-11-11T07:00:19,010 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740/ns/6b5f86dc95d945379474666d1fe0f93a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-11T07:00:19,012 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false 2024-11-11T07:00:19,017 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T07:00:19,018 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:00:19,018 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:00:19,018 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308418967Running coprocessor pre-close hooks at 1731308418967Disabling compacts and flushes for region at 1731308418967Disabling writes for close at 1731308418967Obtaining lock to block concurrent updates at 1731308418967Preparing flush snapshotting stores in 1588230740 at 1731308418967Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731308418968 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731308418968Flushing 1588230740/ns: creating writer at 1731308418969 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731308418986 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731308418986Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16d487ed: reopening flushed file at 1731308419001 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false at 1731308419012 (+11 ms)Writing region close event to WAL at 1731308419013 (+1 ms)Running coprocessor post-close hooks at 1731308419018 (+5 ms)Closed at 1731308419018 2024-11-11T07:00:19,018 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T07:00:19,167 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(976): stopping server f334e8b0fbdf,34611,1731308418013; all regions closed. 2024-11-11T07:00:19,167 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,168 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,168 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,168 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,168 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741834_1010 (size=1152) 2024-11-11T07:00:19,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741834_1010 (size=1152) 2024-11-11T07:00:19,174 DEBUG [RS:0;f334e8b0fbdf:34611 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/oldWALs 2024-11-11T07:00:19,174 INFO [RS:0;f334e8b0fbdf:34611 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C34611%2C1731308418013.meta:.meta(num 1731308418833) 2024-11-11T07:00:19,174 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,174 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,174 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,174 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,175 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741833_1009 (size=93) 2024-11-11T07:00:19,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741833_1009 (size=93) 2024-11-11T07:00:19,179 DEBUG [RS:0;f334e8b0fbdf:34611 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/oldWALs 2024-11-11T07:00:19,179 INFO [RS:0;f334e8b0fbdf:34611 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C34611%2C1731308418013:(num 1731308418434) 2024-11-11T07:00:19,179 DEBUG [RS:0;f334e8b0fbdf:34611 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:19,180 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:00:19,180 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:00:19,180 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.ChoreService(370): Chore service for: regionserver/f334e8b0fbdf:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T07:00:19,180 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:00:19,180 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:00:19,180 INFO [RS:0;f334e8b0fbdf:34611 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34611 2024-11-11T07:00:19,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f334e8b0fbdf,34611,1731308418013 2024-11-11T07:00:19,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:00:19,182 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:00:19,184 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f334e8b0fbdf,34611,1731308418013] 2024-11-11T07:00:19,185 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f334e8b0fbdf,34611,1731308418013 already deleted, retry=false 2024-11-11T07:00:19,185 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f334e8b0fbdf,34611,1731308418013 expired; onlineServers=0 2024-11-11T07:00:19,185 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f334e8b0fbdf,44857,1731308417963' ***** 2024-11-11T07:00:19,185 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T07:00:19,185 INFO [M:0;f334e8b0fbdf:44857 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:00:19,186 INFO [M:0;f334e8b0fbdf:44857 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:00:19,186 DEBUG [M:0;f334e8b0fbdf:44857 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T07:00:19,186 DEBUG [M:0;f334e8b0fbdf:44857 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T07:00:19,186 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T07:00:19,186 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308418215 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308418215,5,FailOnTimeoutGroup] 2024-11-11T07:00:19,186 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308418215 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308418215,5,FailOnTimeoutGroup] 2024-11-11T07:00:19,186 INFO [M:0;f334e8b0fbdf:44857 {}] hbase.ChoreService(370): Chore service for: master/f334e8b0fbdf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T07:00:19,186 INFO [M:0;f334e8b0fbdf:44857 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:00:19,186 DEBUG [M:0;f334e8b0fbdf:44857 {}] master.HMaster(1795): Stopping service threads 2024-11-11T07:00:19,186 INFO [M:0;f334e8b0fbdf:44857 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T07:00:19,186 INFO [M:0;f334e8b0fbdf:44857 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:00:19,187 INFO [M:0;f334e8b0fbdf:44857 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T07:00:19,187 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T07:00:19,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T07:00:19,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:19,190 DEBUG [M:0;f334e8b0fbdf:44857 {}] zookeeper.ZKUtil(347): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T07:00:19,190 WARN [M:0;f334e8b0fbdf:44857 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T07:00:19,190 INFO [M:0;f334e8b0fbdf:44857 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/.lastflushedseqids 2024-11-11T07:00:19,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741836_1012 (size=99) 2024-11-11T07:00:19,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741836_1012 (size=99) 2024-11-11T07:00:19,197 INFO [M:0;f334e8b0fbdf:44857 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T07:00:19,197 INFO [M:0;f334e8b0fbdf:44857 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T07:00:19,198 DEBUG [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:00:19,198 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:19,198 DEBUG [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:19,198 DEBUG [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:00:19,198 DEBUG [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:19,198 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-11T07:00:19,223 DEBUG [M:0;f334e8b0fbdf:44857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03b1564de1734003a81671e692b7efd2 is 82, key is hbase:meta,,1/info:regioninfo/1731308418864/Put/seqid=0 2024-11-11T07:00:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741837_1013 (size=5672) 2024-11-11T07:00:19,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741837_1013 (size=5672) 2024-11-11T07:00:19,230 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03b1564de1734003a81671e692b7efd2 2024-11-11T07:00:19,253 DEBUG [M:0;f334e8b0fbdf:44857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fe55c7a8430e4f6282e5a85bb27ad640 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731308418887/Put/seqid=0 2024-11-11T07:00:19,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741838_1014 (size=5275) 2024-11-11T07:00:19,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741838_1014 (size=5275) 2024-11-11T07:00:19,259 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fe55c7a8430e4f6282e5a85bb27ad640 2024-11-11T07:00:19,281 DEBUG [M:0;f334e8b0fbdf:44857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c8735a4e5eb493083388cffd158796f is 69, key is f334e8b0fbdf,34611,1731308418013/rs:state/1731308418273/Put/seqid=0 2024-11-11T07:00:19,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:00:19,284 INFO [RS:0;f334e8b0fbdf:34611 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:00:19,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34611-0x10105d3de270001, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:00:19,284 INFO [RS:0;f334e8b0fbdf:34611 {}] regionserver.HRegionServer(1031): Exiting; stopping=f334e8b0fbdf,34611,1731308418013; zookeeper connection closed. 2024-11-11T07:00:19,285 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@607c1f46 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@607c1f46 2024-11-11T07:00:19,285 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T07:00:19,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741839_1015 (size=5156) 2024-11-11T07:00:19,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741839_1015 (size=5156) 2024-11-11T07:00:19,287 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c8735a4e5eb493083388cffd158796f 2024-11-11T07:00:19,309 DEBUG [M:0;f334e8b0fbdf:44857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e05089f43f9348998538408ced132f20 is 52, key is load_balancer_on/state:d/1731308418958/Put/seqid=0 2024-11-11T07:00:19,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741840_1016 (size=5056) 2024-11-11T07:00:19,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741840_1016 (size=5056) 2024-11-11T07:00:19,315 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e05089f43f9348998538408ced132f20 2024-11-11T07:00:19,322 DEBUG [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03b1564de1734003a81671e692b7efd2 as hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/03b1564de1734003a81671e692b7efd2 2024-11-11T07:00:19,329 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/03b1564de1734003a81671e692b7efd2, entries=8, sequenceid=29, filesize=5.5 K 2024-11-11T07:00:19,330 DEBUG [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fe55c7a8430e4f6282e5a85bb27ad640 as hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fe55c7a8430e4f6282e5a85bb27ad640 2024-11-11T07:00:19,337 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fe55c7a8430e4f6282e5a85bb27ad640, entries=3, sequenceid=29, filesize=5.2 K 2024-11-11T07:00:19,338 DEBUG [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c8735a4e5eb493083388cffd158796f as hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7c8735a4e5eb493083388cffd158796f 2024-11-11T07:00:19,344 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7c8735a4e5eb493083388cffd158796f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-11T07:00:19,346 DEBUG [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e05089f43f9348998538408ced132f20 as hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e05089f43f9348998538408ced132f20 2024-11-11T07:00:19,351 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45449/user/jenkins/test-data/42819728-fecb-e6fd-a7a5-f80280fc29b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e05089f43f9348998538408ced132f20, entries=1, sequenceid=29, filesize=4.9 K 2024-11-11T07:00:19,352 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=29, compaction requested=false 2024-11-11T07:00:19,354 INFO [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:19,354 DEBUG [M:0;f334e8b0fbdf:44857 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308419197Disabling compacts and flushes for region at 1731308419197Disabling writes for close at 1731308419198 (+1 ms)Obtaining lock to block concurrent updates at 1731308419198Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731308419198Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731308419199 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731308419199Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731308419200 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731308419223 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731308419223Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731308419236 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731308419253 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731308419253Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731308419265 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731308419280 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731308419280Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731308419293 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731308419308 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731308419308Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@287923bf: reopening flushed file at 1731308419321 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fbe0478: reopening flushed file at 1731308419329 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c3873a4: reopening flushed file at 1731308419337 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6571a00a: reopening flushed file at 1731308419345 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=29, compaction requested=false at 1731308419352 (+7 ms)Writing region close event to WAL at 1731308419354 (+2 ms)Closed at 1731308419354 2024-11-11T07:00:19,355 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,355 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,355 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,355 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,355 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:19,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38405 is added to blk_1073741830_1006 (size=10311) 2024-11-11T07:00:19,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43347 is added to blk_1073741830_1006 (size=10311) 2024-11-11T07:00:19,358 INFO [M:0;f334e8b0fbdf:44857 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T07:00:19,358 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:00:19,358 INFO [M:0;f334e8b0fbdf:44857 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44857 2024-11-11T07:00:19,358 INFO [M:0;f334e8b0fbdf:44857 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:00:19,460 INFO [M:0;f334e8b0fbdf:44857 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:00:19,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:00:19,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44857-0x10105d3de270000, quorum=127.0.0.1:64854, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:00:19,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@824b6ae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:19,463 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d7e5c06{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:00:19,463 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:00:19,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@376d199b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:00:19,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b44e274{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/hadoop.log.dir/,STOPPED} 2024-11-11T07:00:19,465 WARN [BP-1769345365-172.17.0.2-1731308417235 heartbeating to localhost/127.0.0.1:45449 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:00:19,465 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:00:19,465 WARN [BP-1769345365-172.17.0.2-1731308417235 heartbeating to localhost/127.0.0.1:45449 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1769345365-172.17.0.2-1731308417235 (Datanode Uuid 3ecb14db-c810-416b-aefa-fdec9d461727) service to localhost/127.0.0.1:45449 2024-11-11T07:00:19,465 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:00:19,466 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/data/data3/current/BP-1769345365-172.17.0.2-1731308417235 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:19,466 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/data/data4/current/BP-1769345365-172.17.0.2-1731308417235 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:19,466 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:00:19,468 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d4bdc00{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:19,468 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@687b21ce{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:00:19,468 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:00:19,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@276f8783{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:00:19,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4edee9ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/hadoop.log.dir/,STOPPED} 2024-11-11T07:00:19,470 WARN [BP-1769345365-172.17.0.2-1731308417235 heartbeating to localhost/127.0.0.1:45449 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:00:19,470 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:00:19,470 WARN [BP-1769345365-172.17.0.2-1731308417235 heartbeating to localhost/127.0.0.1:45449 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1769345365-172.17.0.2-1731308417235 (Datanode Uuid 2b62e074-7167-4a12-be57-b6df95df27e4) service to localhost/127.0.0.1:45449 2024-11-11T07:00:19,470 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:00:19,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/data/data1/current/BP-1769345365-172.17.0.2-1731308417235 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:19,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/cluster_f8e9c5d0-28b2-d6b7-6f76-2c451ebf4b01/data/data2/current/BP-1769345365-172.17.0.2-1731308417235 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:19,471 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:00:19,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@511dc70f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:00:19,478 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e469283{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:00:19,478 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:00:19,478 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45e3157d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:00:19,478 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3197ca45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/hadoop.log.dir/,STOPPED} 2024-11-11T07:00:19,484 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T07:00:19,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T07:00:19,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T07:00:19,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/hadoop.log.dir so I do NOT create it in target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349 2024-11-11T07:00:19,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0aa24824-efaa-35af-71a6-dc2d9e411f6c/hadoop.tmp.dir so I do NOT create it in target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349 2024-11-11T07:00:19,500 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439, deleteOnExit=true 2024-11-11T07:00:19,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T07:00:19,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/test.cache.data in system properties and HBase conf 2024-11-11T07:00:19,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T07:00:19,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir in system properties and HBase conf 2024-11-11T07:00:19,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T07:00:19,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T07:00:19,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T07:00:19,501 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T07:00:19,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:00:19,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:00:19,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T07:00:19,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:00:19,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T07:00:19,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T07:00:19,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:00:19,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:00:19,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T07:00:19,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/nfs.dump.dir in system properties and HBase conf 2024-11-11T07:00:19,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/java.io.tmpdir in system properties and HBase conf 2024-11-11T07:00:19,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:00:19,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T07:00:19,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T07:00:19,516 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:00:19,592 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:19,598 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:00:19,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:00:19,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:00:19,603 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:00:19,603 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:19,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:00:19,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:00:19,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cd2a640{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/java.io.tmpdir/jetty-localhost-40297-hadoop-hdfs-3_4_1-tests_jar-_-any-8987261771506132369/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:00:19,721 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:40297} 2024-11-11T07:00:19,721 INFO [Time-limited test {}] server.Server(415): Started @103300ms 2024-11-11T07:00:19,735 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:00:19,807 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:19,812 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:00:19,813 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:00:19,813 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:00:19,813 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:00:19,814 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:00:19,814 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:00:19,934 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ab5393f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/java.io.tmpdir/jetty-localhost-44495-hadoop-hdfs-3_4_1-tests_jar-_-any-5882044077569707435/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:19,934 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:44495} 2024-11-11T07:00:19,934 INFO [Time-limited test {}] server.Server(415): Started @103513ms 2024-11-11T07:00:19,936 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:00:19,968 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:19,972 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:00:19,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:00:19,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:00:19,973 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:00:19,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:00:19,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:00:20,039 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data1/current/BP-1526976113-172.17.0.2-1731308419534/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:20,039 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data2/current/BP-1526976113-172.17.0.2-1731308419534/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:20,060 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:00:20,063 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x27cb9f6f1935a558 with lease ID 0x8bd315100e7fffa: Processing first storage report for DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81 from datanode DatanodeRegistration(127.0.0.1:41209, datanodeUuid=500c043f-129e-4f6a-b5f3-a290998c9579, infoPort=44725, infoSecurePort=0, ipcPort=33097, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534) 2024-11-11T07:00:20,063 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x27cb9f6f1935a558 with lease ID 0x8bd315100e7fffa: from storage DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81 node DatanodeRegistration(127.0.0.1:41209, datanodeUuid=500c043f-129e-4f6a-b5f3-a290998c9579, infoPort=44725, infoSecurePort=0, ipcPort=33097, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:20,063 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x27cb9f6f1935a558 with lease ID 0x8bd315100e7fffa: Processing first storage report for DS-580f8dce-0426-44bc-b9b7-9ed30aebc565 from datanode DatanodeRegistration(127.0.0.1:41209, datanodeUuid=500c043f-129e-4f6a-b5f3-a290998c9579, infoPort=44725, infoSecurePort=0, ipcPort=33097, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534) 2024-11-11T07:00:20,063 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x27cb9f6f1935a558 with lease ID 0x8bd315100e7fffa: from storage DS-580f8dce-0426-44bc-b9b7-9ed30aebc565 node DatanodeRegistration(127.0.0.1:41209, datanodeUuid=500c043f-129e-4f6a-b5f3-a290998c9579, infoPort=44725, infoSecurePort=0, ipcPort=33097, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:20,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21d5e4af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/java.io.tmpdir/jetty-localhost-37349-hadoop-hdfs-3_4_1-tests_jar-_-any-6163734832056206563/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:20,096 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:37349} 2024-11-11T07:00:20,096 INFO [Time-limited test {}] server.Server(415): Started @103675ms 2024-11-11T07:00:20,098 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:00:20,209 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data3/current/BP-1526976113-172.17.0.2-1731308419534/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:20,210 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data4/current/BP-1526976113-172.17.0.2-1731308419534/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:20,232 WARN [Thread-670 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:00:20,235 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfa902a962796192 with lease ID 0x8bd315100e7fffb: Processing first storage report for DS-838013a3-793d-45af-bf61-73d95dc42912 from datanode DatanodeRegistration(127.0.0.1:39455, datanodeUuid=85195dfd-4a83-4b20-9c58-2eee99c5bac8, infoPort=43437, infoSecurePort=0, ipcPort=40801, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534) 2024-11-11T07:00:20,235 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfa902a962796192 with lease ID 0x8bd315100e7fffb: from storage DS-838013a3-793d-45af-bf61-73d95dc42912 node DatanodeRegistration(127.0.0.1:39455, datanodeUuid=85195dfd-4a83-4b20-9c58-2eee99c5bac8, infoPort=43437, infoSecurePort=0, ipcPort=40801, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:20,235 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfa902a962796192 with lease ID 0x8bd315100e7fffb: Processing first storage report for DS-d28ad5a6-0a53-4114-ac14-f452714625d5 from datanode DatanodeRegistration(127.0.0.1:39455, datanodeUuid=85195dfd-4a83-4b20-9c58-2eee99c5bac8, infoPort=43437, infoSecurePort=0, ipcPort=40801, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534) 2024-11-11T07:00:20,235 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfa902a962796192 with lease ID 0x8bd315100e7fffb: from storage DS-d28ad5a6-0a53-4114-ac14-f452714625d5 node DatanodeRegistration(127.0.0.1:39455, datanodeUuid=85195dfd-4a83-4b20-9c58-2eee99c5bac8, infoPort=43437, infoSecurePort=0, ipcPort=40801, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:20,295 INFO [regionserver/f334e8b0fbdf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:00:20,327 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349 2024-11-11T07:00:20,330 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/zookeeper_0, clientPort=50876, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T07:00:20,331 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50876 2024-11-11T07:00:20,331 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:20,333 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:20,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39455 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:00:20,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41209 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:00:20,345 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842 with version=8 2024-11-11T07:00:20,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/hbase-staging 2024-11-11T07:00:20,347 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:00:20,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:20,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:20,347 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:00:20,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:20,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:00:20,347 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T07:00:20,348 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:00:20,348 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40641 2024-11-11T07:00:20,350 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40641 connecting to ZooKeeper ensemble=127.0.0.1:50876 2024-11-11T07:00:20,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:406410x0, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:00:20,360 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40641-0x10105d3e7790000 connected 2024-11-11T07:00:20,378 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:20,380 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:20,382 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:20,382 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842, hbase.cluster.distributed=false 2024-11-11T07:00:20,384 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:00:20,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40641 2024-11-11T07:00:20,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40641 2024-11-11T07:00:20,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40641 2024-11-11T07:00:20,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40641 2024-11-11T07:00:20,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40641 2024-11-11T07:00:20,401 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:00:20,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:20,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:20,402 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:00:20,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:20,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:00:20,402 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T07:00:20,402 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:00:20,403 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45321 2024-11-11T07:00:20,404 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45321 connecting to ZooKeeper ensemble=127.0.0.1:50876 2024-11-11T07:00:20,405 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:20,407 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:20,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:453210x0, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:00:20,411 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:20,411 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45321-0x10105d3e7790001 connected 2024-11-11T07:00:20,412 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T07:00:20,412 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T07:00:20,413 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T07:00:20,414 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:00:20,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45321 2024-11-11T07:00:20,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45321 2024-11-11T07:00:20,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45321 2024-11-11T07:00:20,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45321 2024-11-11T07:00:20,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45321 2024-11-11T07:00:20,430 DEBUG [M:0;f334e8b0fbdf:40641 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f334e8b0fbdf:40641 2024-11-11T07:00:20,430 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f334e8b0fbdf,40641,1731308420347 2024-11-11T07:00:20,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:00:20,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:00:20,432 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f334e8b0fbdf,40641,1731308420347 2024-11-11T07:00:20,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T07:00:20,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:20,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:20,436 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T07:00:20,437 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f334e8b0fbdf,40641,1731308420347 from backup master directory 2024-11-11T07:00:20,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f334e8b0fbdf,40641,1731308420347 2024-11-11T07:00:20,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:00:20,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:00:20,438 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:00:20,438 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f334e8b0fbdf,40641,1731308420347 2024-11-11T07:00:20,443 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/hbase.id] with ID: 5de8390b-13af-41ec-b389-cd5b1ddfd720 2024-11-11T07:00:20,443 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/.tmp/hbase.id 2024-11-11T07:00:20,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41209 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:00:20,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39455 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:00:20,450 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/.tmp/hbase.id]:[hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/hbase.id] 2024-11-11T07:00:20,463 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:20,463 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T07:00:20,464 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T07:00:20,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:20,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:20,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41209 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:00:20,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39455 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:00:20,474 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T07:00:20,475 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T07:00:20,475 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:00:20,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39455 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:00:20,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41209 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:00:20,484 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store 2024-11-11T07:00:20,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41209 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:00:20,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39455 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:00:20,492 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:00:20,492 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:00:20,492 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:20,492 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:20,492 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:00:20,492 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:20,492 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:00:20,492 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308420492Disabling compacts and flushes for region at 1731308420492Disabling writes for close at 1731308420492Writing region close event to WAL at 1731308420492Closed at 1731308420492 2024-11-11T07:00:20,493 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/.initializing 2024-11-11T07:00:20,493 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347 2024-11-11T07:00:20,496 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C40641%2C1731308420347, suffix=, logDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347, archiveDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/oldWALs, maxLogs=10 2024-11-11T07:00:20,497 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C40641%2C1731308420347.1731308420497 2024-11-11T07:00:20,502 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347/f334e8b0fbdf%2C40641%2C1731308420347.1731308420497 2024-11-11T07:00:20,503 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43437:43437),(127.0.0.1/127.0.0.1:44725:44725)] 2024-11-11T07:00:20,504 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:00:20,504 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:00:20,504 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,504 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,506 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T07:00:20,508 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:20,508 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:20,508 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,509 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T07:00:20,510 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:20,510 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:00:20,510 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,511 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T07:00:20,511 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:20,512 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:00:20,512 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,513 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T07:00:20,513 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:20,513 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:00:20,514 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,514 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,515 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,516 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,516 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,517 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T07:00:20,518 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:00:20,521 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:00:20,521 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705076, jitterRate=-0.10344982147216797}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T07:00:20,522 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731308420505Initializing all the Stores at 1731308420505Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308420506 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308420506Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308420506Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308420506Cleaning up temporary data from old regions at 1731308420516 (+10 ms)Region opened successfully at 1731308420522 (+6 ms) 2024-11-11T07:00:20,522 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T07:00:20,526 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21c111f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:00:20,527 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T07:00:20,527 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T07:00:20,527 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T07:00:20,527 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T07:00:20,528 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T07:00:20,528 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T07:00:20,528 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T07:00:20,530 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T07:00:20,531 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T07:00:20,532 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T07:00:20,532 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T07:00:20,533 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T07:00:20,535 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T07:00:20,535 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T07:00:20,536 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T07:00:20,537 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T07:00:20,538 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T07:00:20,539 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T07:00:20,541 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T07:00:20,542 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T07:00:20,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:20,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:20,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:20,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:20,546 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f334e8b0fbdf,40641,1731308420347, sessionid=0x10105d3e7790000, setting cluster-up flag (Was=false) 2024-11-11T07:00:20,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:20,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:20,554 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T07:00:20,555 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,40641,1731308420347 2024-11-11T07:00:20,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:20,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:20,563 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T07:00:20,564 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,40641,1731308420347 2024-11-11T07:00:20,565 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T07:00:20,567 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T07:00:20,568 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T07:00:20,568 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T07:00:20,568 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f334e8b0fbdf,40641,1731308420347 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T07:00:20,569 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:00:20,569 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:00:20,569 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:00:20,569 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:00:20,569 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f334e8b0fbdf:0, corePoolSize=10, maxPoolSize=10 2024-11-11T07:00:20,569 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,569 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:00:20,569 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,570 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731308450570 2024-11-11T07:00:20,570 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T07:00:20,570 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T07:00:20,571 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T07:00:20,571 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T07:00:20,571 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T07:00:20,571 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T07:00:20,571 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,571 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T07:00:20,571 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T07:00:20,571 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T07:00:20,571 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:00:20,571 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T07:00:20,572 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T07:00:20,572 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T07:00:20,572 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308420572,5,FailOnTimeoutGroup] 2024-11-11T07:00:20,572 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308420572,5,FailOnTimeoutGroup] 2024-11-11T07:00:20,572 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,572 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T07:00:20,572 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,572 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,573 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:20,573 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T07:00:20,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41209 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:00:20,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39455 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:00:20,582 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T07:00:20,582 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842 2024-11-11T07:00:20,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41209 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:00:20,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39455 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:00:20,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:00:20,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:00:20,593 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:00:20,593 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:20,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:20,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:00:20,595 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:00:20,595 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:20,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:20,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:00:20,597 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:00:20,597 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:20,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:20,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:00:20,599 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:00:20,599 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:20,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:20,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:00:20,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740 2024-11-11T07:00:20,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740 2024-11-11T07:00:20,601 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:00:20,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:00:20,602 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:00:20,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:00:20,605 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:00:20,606 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744855, jitterRate=-0.05286867916584015}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:00:20,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731308420590Initializing all the Stores at 1731308420591 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308420591Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308420591Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308420591Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308420591Cleaning up temporary data from old regions at 1731308420602 (+11 ms)Region opened successfully at 1731308420606 (+4 ms) 2024-11-11T07:00:20,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:00:20,607 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:00:20,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:00:20,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:00:20,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:00:20,607 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:00:20,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308420607Disabling compacts and flushes for region at 1731308420607Disabling writes for close at 1731308420607Writing region close event to WAL at 1731308420607Closed at 1731308420607 2024-11-11T07:00:20,609 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:00:20,609 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T07:00:20,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T07:00:20,611 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:00:20,612 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T07:00:20,618 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(746): ClusterId : 5de8390b-13af-41ec-b389-cd5b1ddfd720 2024-11-11T07:00:20,618 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T07:00:20,621 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T07:00:20,621 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T07:00:20,625 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T07:00:20,625 DEBUG [RS:0;f334e8b0fbdf:45321 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4eb4bc50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:00:20,638 DEBUG [RS:0;f334e8b0fbdf:45321 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f334e8b0fbdf:45321 2024-11-11T07:00:20,638 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T07:00:20,638 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T07:00:20,638 DEBUG [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T07:00:20,639 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(2659): reportForDuty to master=f334e8b0fbdf,40641,1731308420347 with port=45321, startcode=1731308420401 2024-11-11T07:00:20,639 DEBUG [RS:0;f334e8b0fbdf:45321 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T07:00:20,641 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36191, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T07:00:20,642 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40641 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:20,642 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40641 {}] master.ServerManager(517): Registering regionserver=f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:20,644 DEBUG [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842 2024-11-11T07:00:20,644 DEBUG [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41533 2024-11-11T07:00:20,644 DEBUG [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T07:00:20,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:00:20,646 DEBUG [RS:0;f334e8b0fbdf:45321 {}] zookeeper.ZKUtil(111): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:20,646 WARN [RS:0;f334e8b0fbdf:45321 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:00:20,646 INFO [RS:0;f334e8b0fbdf:45321 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:00:20,647 DEBUG [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:20,647 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f334e8b0fbdf,45321,1731308420401] 2024-11-11T07:00:20,650 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T07:00:20,652 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T07:00:20,653 INFO [RS:0;f334e8b0fbdf:45321 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T07:00:20,653 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,653 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T07:00:20,654 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T07:00:20,654 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,654 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,655 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:20,655 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:00:20,655 DEBUG [RS:0;f334e8b0fbdf:45321 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:00:20,655 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,656 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,656 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,656 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,656 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,656 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,45321,1731308420401-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:00:20,672 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T07:00:20,672 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,45321,1731308420401-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,672 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,672 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.Replication(171): f334e8b0fbdf,45321,1731308420401 started 2024-11-11T07:00:20,718 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:20,718 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(1482): Serving as f334e8b0fbdf,45321,1731308420401, RpcServer on f334e8b0fbdf/172.17.0.2:45321, sessionid=0x10105d3e7790001 2024-11-11T07:00:20,718 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T07:00:20,719 DEBUG [RS:0;f334e8b0fbdf:45321 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:20,719 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,45321,1731308420401' 2024-11-11T07:00:20,719 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T07:00:20,719 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T07:00:20,720 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T07:00:20,720 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T07:00:20,720 DEBUG [RS:0;f334e8b0fbdf:45321 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:20,720 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,45321,1731308420401' 2024-11-11T07:00:20,720 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T07:00:20,721 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T07:00:20,721 DEBUG [RS:0;f334e8b0fbdf:45321 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T07:00:20,721 INFO [RS:0;f334e8b0fbdf:45321 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T07:00:20,721 INFO [RS:0;f334e8b0fbdf:45321 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T07:00:20,762 WARN [f334e8b0fbdf:40641 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T07:00:20,824 INFO [RS:0;f334e8b0fbdf:45321 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C45321%2C1731308420401, suffix=, logDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401, archiveDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/oldWALs, maxLogs=32 2024-11-11T07:00:20,825 INFO [RS:0;f334e8b0fbdf:45321 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 2024-11-11T07:00:20,832 INFO [RS:0;f334e8b0fbdf:45321 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 2024-11-11T07:00:20,838 DEBUG [RS:0;f334e8b0fbdf:45321 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43437:43437),(127.0.0.1/127.0.0.1:44725:44725)] 2024-11-11T07:00:21,012 DEBUG [f334e8b0fbdf:40641 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T07:00:21,013 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:21,015 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,45321,1731308420401, state=OPENING 2024-11-11T07:00:21,016 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T07:00:21,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:21,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:21,019 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:00:21,019 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:00:21,019 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:00:21,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,45321,1731308420401}] 2024-11-11T07:00:21,173 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T07:00:21,175 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58639, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T07:00:21,179 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T07:00:21,179 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:00:21,181 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C45321%2C1731308420401.meta, suffix=.meta, logDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401, archiveDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/oldWALs, maxLogs=32 2024-11-11T07:00:21,182 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta 2024-11-11T07:00:21,187 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta 2024-11-11T07:00:21,188 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43437:43437),(127.0.0.1/127.0.0.1:44725:44725)] 2024-11-11T07:00:21,189 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:00:21,189 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T07:00:21,189 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T07:00:21,189 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T07:00:21,190 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T07:00:21,190 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:00:21,190 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T07:00:21,190 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T07:00:21,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:00:21,193 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:00:21,193 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:21,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:21,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:00:21,195 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:00:21,195 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:21,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:21,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:00:21,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:00:21,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:21,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:21,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:00:21,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:00:21,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:21,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:00:21,199 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:00:21,199 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740 2024-11-11T07:00:21,200 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740 2024-11-11T07:00:21,202 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:00:21,202 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:00:21,202 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:00:21,203 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:00:21,204 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826710, jitterRate=0.05121631920337677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:00:21,204 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T07:00:21,205 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731308421190Writing region info on filesystem at 1731308421190Initializing all the Stores at 1731308421191 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308421191Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308421191Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308421191Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308421191Cleaning up temporary data from old regions at 1731308421202 (+11 ms)Running coprocessor post-open hooks at 1731308421204 (+2 ms)Region opened successfully at 1731308421205 (+1 ms) 2024-11-11T07:00:21,206 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731308421172 2024-11-11T07:00:21,209 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T07:00:21,209 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T07:00:21,210 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:21,211 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,45321,1731308420401, state=OPEN 2024-11-11T07:00:21,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:00:21,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:00:21,217 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:21,217 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:00:21,217 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:00:21,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T07:00:21,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,45321,1731308420401 in 198 msec 2024-11-11T07:00:21,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T07:00:21,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-11-11T07:00:21,223 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:00:21,223 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T07:00:21,225 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:00:21,225 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,45321,1731308420401, seqNum=-1] 2024-11-11T07:00:21,225 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:00:21,227 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34133, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:00:21,233 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 665 msec 2024-11-11T07:00:21,233 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731308421233, completionTime=-1 2024-11-11T07:00:21,233 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T07:00:21,233 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T07:00:21,236 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-11T07:00:21,236 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731308481236 2024-11-11T07:00:21,236 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731308541236 2024-11-11T07:00:21,236 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-11T07:00:21,237 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,40641,1731308420347-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,237 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,40641,1731308420347-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,237 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,40641,1731308420347-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,237 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f334e8b0fbdf:40641, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,237 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,237 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,239 DEBUG [master/f334e8b0fbdf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T07:00:21,242 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.803sec 2024-11-11T07:00:21,242 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T07:00:21,242 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T07:00:21,242 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T07:00:21,242 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T07:00:21,242 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T07:00:21,242 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,40641,1731308420347-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:00:21,242 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,40641,1731308420347-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T07:00:21,245 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T07:00:21,245 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T07:00:21,245 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,40641,1731308420347-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,319 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3efce601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:00:21,319 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f334e8b0fbdf,40641,-1 for getting cluster id 2024-11-11T07:00:21,319 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T07:00:21,321 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5de8390b-13af-41ec-b389-cd5b1ddfd720' 2024-11-11T07:00:21,321 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T07:00:21,322 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5de8390b-13af-41ec-b389-cd5b1ddfd720" 2024-11-11T07:00:21,322 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65e8844b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:00:21,322 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f334e8b0fbdf,40641,-1] 2024-11-11T07:00:21,322 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T07:00:21,322 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:21,324 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53732, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T07:00:21,325 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67c279b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:00:21,325 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:00:21,326 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,45321,1731308420401, seqNum=-1] 2024-11-11T07:00:21,326 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:00:21,328 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54084, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:00:21,329 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f334e8b0fbdf,40641,1731308420347 2024-11-11T07:00:21,330 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:21,332 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T07:00:21,348 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:00:21,348 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:21,348 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:21,348 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:00:21,348 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:00:21,348 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:00:21,348 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T07:00:21,348 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:00:21,349 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42847 2024-11-11T07:00:21,350 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42847 connecting to ZooKeeper ensemble=127.0.0.1:50876 2024-11-11T07:00:21,351 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:21,352 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:00:21,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428470x0, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:00:21,357 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42847-0x10105d3e7790002, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-11T07:00:21,357 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42847-0x10105d3e7790002 connected 2024-11-11T07:00:21,357 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-11T07:00:21,357 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T07:00:21,358 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T07:00:21,359 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42847-0x10105d3e7790002, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T07:00:21,360 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42847-0x10105d3e7790002, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:00:21,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42847 2024-11-11T07:00:21,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42847 2024-11-11T07:00:21,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42847 2024-11-11T07:00:21,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42847 2024-11-11T07:00:21,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42847 2024-11-11T07:00:21,362 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(746): ClusterId : 5de8390b-13af-41ec-b389-cd5b1ddfd720 2024-11-11T07:00:21,362 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T07:00:21,365 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T07:00:21,365 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T07:00:21,367 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T07:00:21,367 DEBUG [RS:1;f334e8b0fbdf:42847 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34e94bff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:00:21,379 DEBUG [RS:1;f334e8b0fbdf:42847 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;f334e8b0fbdf:42847 2024-11-11T07:00:21,379 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T07:00:21,379 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T07:00:21,379 DEBUG [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T07:00:21,380 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(2659): reportForDuty to master=f334e8b0fbdf,40641,1731308420347 with port=42847, startcode=1731308421347 2024-11-11T07:00:21,380 DEBUG [RS:1;f334e8b0fbdf:42847 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T07:00:21,382 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54819, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T07:00:21,382 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40641 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f334e8b0fbdf,42847,1731308421347 2024-11-11T07:00:21,382 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40641 {}] master.ServerManager(517): Registering regionserver=f334e8b0fbdf,42847,1731308421347 2024-11-11T07:00:21,384 DEBUG [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842 2024-11-11T07:00:21,384 DEBUG [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41533 2024-11-11T07:00:21,384 DEBUG [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T07:00:21,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:00:21,388 DEBUG [RS:1;f334e8b0fbdf:42847 {}] zookeeper.ZKUtil(111): regionserver:42847-0x10105d3e7790002, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f334e8b0fbdf,42847,1731308421347 2024-11-11T07:00:21,388 WARN [RS:1;f334e8b0fbdf:42847 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:00:21,388 INFO [RS:1;f334e8b0fbdf:42847 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:00:21,388 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f334e8b0fbdf,42847,1731308421347] 2024-11-11T07:00:21,388 DEBUG [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347 2024-11-11T07:00:21,392 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T07:00:21,394 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T07:00:21,394 INFO [RS:1;f334e8b0fbdf:42847 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T07:00:21,394 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,394 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T07:00:21,395 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T07:00:21,395 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,395 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,395 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,395 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,395 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,395 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,396 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:00:21,396 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,396 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,396 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,396 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,396 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,396 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:00:21,396 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:00:21,396 DEBUG [RS:1;f334e8b0fbdf:42847 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:00:21,396 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,396 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,396 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,396 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,396 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,396 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,42847,1731308421347-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:00:21,411 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T07:00:21,411 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,42847,1731308421347-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,411 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,411 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.Replication(171): f334e8b0fbdf,42847,1731308421347 started 2024-11-11T07:00:21,425 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:00:21,425 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(1482): Serving as f334e8b0fbdf,42847,1731308421347, RpcServer on f334e8b0fbdf/172.17.0.2:42847, sessionid=0x10105d3e7790002 2024-11-11T07:00:21,425 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T07:00:21,425 DEBUG [RS:1;f334e8b0fbdf:42847 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f334e8b0fbdf,42847,1731308421347 2024-11-11T07:00:21,425 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;f334e8b0fbdf:42847,5,FailOnTimeoutGroup] 2024-11-11T07:00:21,425 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,42847,1731308421347' 2024-11-11T07:00:21,425 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T07:00:21,425 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-11T07:00:21,426 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T07:00:21,426 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T07:00:21,426 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T07:00:21,426 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T07:00:21,426 DEBUG [RS:1;f334e8b0fbdf:42847 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f334e8b0fbdf,42847,1731308421347 2024-11-11T07:00:21,426 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,42847,1731308421347' 2024-11-11T07:00:21,426 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T07:00:21,427 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T07:00:21,427 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is f334e8b0fbdf,40641,1731308420347 2024-11-11T07:00:21,427 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1459431e 2024-11-11T07:00:21,427 DEBUG [RS:1;f334e8b0fbdf:42847 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T07:00:21,427 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T07:00:21,427 INFO [RS:1;f334e8b0fbdf:42847 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T07:00:21,427 INFO [RS:1;f334e8b0fbdf:42847 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T07:00:21,429 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53736, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T07:00:21,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40641 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T07:00:21,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40641 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T07:00:21,430 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40641 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T07:00:21,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40641 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T07:00:21,433 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T07:00:21,433 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:21,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40641 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-11T07:00:21,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40641 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T07:00:21,434 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T07:00:21,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41209 is added to blk_1073741835_1011 (size=393) 2024-11-11T07:00:21,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39455 is added to blk_1073741835_1011 (size=393) 2024-11-11T07:00:21,443 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b2b5aa4a37c19b1f701e750c2db983e3, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842 2024-11-11T07:00:21,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41209 is added to blk_1073741836_1012 (size=76) 2024-11-11T07:00:21,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39455 is added to blk_1073741836_1012 (size=76) 2024-11-11T07:00:21,451 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:00:21,451 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing b2b5aa4a37c19b1f701e750c2db983e3, disabling compactions & flushes 2024-11-11T07:00:21,451 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:21,451 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:21,451 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. after waiting 0 ms 2024-11-11T07:00:21,451 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:21,451 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:21,451 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for b2b5aa4a37c19b1f701e750c2db983e3: Waiting for close lock at 1731308421451Disabling compacts and flushes for region at 1731308421451Disabling writes for close at 1731308421451Writing region close event to WAL at 1731308421451Closed at 1731308421451 2024-11-11T07:00:21,453 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T07:00:21,453 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731308421453"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731308421453"}]},"ts":"1731308421453"} 2024-11-11T07:00:21,455 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T07:00:21,456 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T07:00:21,457 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731308421457"}]},"ts":"1731308421457"} 2024-11-11T07:00:21,459 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-11T07:00:21,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b2b5aa4a37c19b1f701e750c2db983e3, ASSIGN}] 2024-11-11T07:00:21,460 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b2b5aa4a37c19b1f701e750c2db983e3, ASSIGN 2024-11-11T07:00:21,461 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b2b5aa4a37c19b1f701e750c2db983e3, ASSIGN; state=OFFLINE, location=f334e8b0fbdf,45321,1731308420401; forceNewPlan=false, retain=false 2024-11-11T07:00:21,529 INFO [RS:1;f334e8b0fbdf:42847 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C42847%2C1731308421347, suffix=, logDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347, archiveDir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/oldWALs, maxLogs=32 2024-11-11T07:00:21,530 INFO [RS:1;f334e8b0fbdf:42847 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 2024-11-11T07:00:21,536 INFO [RS:1;f334e8b0fbdf:42847 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 2024-11-11T07:00:21,537 DEBUG [RS:1;f334e8b0fbdf:42847 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43437:43437),(127.0.0.1/127.0.0.1:44725:44725)] 2024-11-11T07:00:21,605 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-11T07:00:21,612 INFO [f334e8b0fbdf:40641 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T07:00:21,613 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b2b5aa4a37c19b1f701e750c2db983e3, regionState=OPENING, regionLocation=f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:21,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b2b5aa4a37c19b1f701e750c2db983e3, ASSIGN because future has completed 2024-11-11T07:00:21,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b2b5aa4a37c19b1f701e750c2db983e3, server=f334e8b0fbdf,45321,1731308420401}] 2024-11-11T07:00:21,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:00:21,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:00:21,773 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:21,773 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b2b5aa4a37c19b1f701e750c2db983e3, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:00:21,774 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,774 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:00:21,774 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,774 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,776 INFO [StoreOpener-b2b5aa4a37c19b1f701e750c2db983e3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,777 INFO [StoreOpener-b2b5aa4a37c19b1f701e750c2db983e3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b2b5aa4a37c19b1f701e750c2db983e3 columnFamilyName info 2024-11-11T07:00:21,777 DEBUG [StoreOpener-b2b5aa4a37c19b1f701e750c2db983e3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:00:21,777 INFO [StoreOpener-b2b5aa4a37c19b1f701e750c2db983e3-1 {}] regionserver.HStore(327): Store=b2b5aa4a37c19b1f701e750c2db983e3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:00:21,778 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,778 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,779 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,779 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,779 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,781 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,783 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:00:21,783 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b2b5aa4a37c19b1f701e750c2db983e3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777647, jitterRate=-0.011171907186508179}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T07:00:21,783 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:21,784 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b2b5aa4a37c19b1f701e750c2db983e3: Running coprocessor pre-open hook at 1731308421774Writing region info on filesystem at 1731308421774Initializing all the Stores at 1731308421775 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308421775Cleaning up temporary data from old regions at 1731308421779 (+4 ms)Running coprocessor post-open hooks at 1731308421783 (+4 ms)Region opened successfully at 1731308421784 (+1 ms) 2024-11-11T07:00:21,785 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3., pid=6, masterSystemTime=1731308421769 2024-11-11T07:00:21,787 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:21,787 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:21,788 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b2b5aa4a37c19b1f701e750c2db983e3, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:21,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b2b5aa4a37c19b1f701e750c2db983e3, server=f334e8b0fbdf,45321,1731308420401 because future has completed 2024-11-11T07:00:21,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T07:00:21,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b2b5aa4a37c19b1f701e750c2db983e3, server=f334e8b0fbdf,45321,1731308420401 in 176 msec 2024-11-11T07:00:21,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T07:00:21,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b2b5aa4a37c19b1f701e750c2db983e3, ASSIGN in 336 msec 2024-11-11T07:00:21,799 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T07:00:21,799 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731308421799"}]},"ts":"1731308421799"} 2024-11-11T07:00:21,801 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-11T07:00:21,802 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T07:00:21,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 372 msec 2024-11-11T07:00:22,179 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T07:00:22,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:00:22,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:00:22,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:00:22,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:00:27,108 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T07:00:27,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:00:27,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:00:27,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:00:27,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:00:27,134 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T07:00:27,135 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-11T07:00:31,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40641 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T07:00:31,475 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-11T07:00:31,475 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-11T07:00:31,478 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T07:00:31,479 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:31,491 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:31,494 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:00:31,495 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:00:31,495 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:00:31,495 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:00:31,496 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75255721{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:00:31,496 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@132d95f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:00:31,605 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T07:00:31,605 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-11T07:00:31,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65266587{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/java.io.tmpdir/jetty-localhost-42577-hadoop-hdfs-3_4_1-tests_jar-_-any-7451044572758945729/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:31,611 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e157477{HTTP/1.1, (http/1.1)}{localhost:42577} 2024-11-11T07:00:31,611 INFO [Time-limited test {}] server.Server(415): Started @115191ms 2024-11-11T07:00:31,613 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:00:31,648 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:31,651 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:00:31,652 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:00:31,652 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:00:31,652 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:00:31,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fbc343d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:00:31,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3986ff43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:00:31,716 WARN [Thread-826 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5/current/BP-1526976113-172.17.0.2-1731308419534/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:31,716 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6/current/BP-1526976113-172.17.0.2-1731308419534/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:31,732 WARN [Thread-806 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:00:31,734 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7c39a21028ce9db with lease ID 0x8bd315100e7fffc: Processing first storage report for DS-31f412c9-cb6c-41c5-b526-09a900da6299 from datanode DatanodeRegistration(127.0.0.1:46381, datanodeUuid=12e56a40-da9e-448c-8b67-5dbfa0a7e9db, infoPort=34315, infoSecurePort=0, ipcPort=40021, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534) 2024-11-11T07:00:31,734 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7c39a21028ce9db with lease ID 0x8bd315100e7fffc: from storage DS-31f412c9-cb6c-41c5-b526-09a900da6299 node DatanodeRegistration(127.0.0.1:46381, datanodeUuid=12e56a40-da9e-448c-8b67-5dbfa0a7e9db, infoPort=34315, infoSecurePort=0, ipcPort=40021, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:31,735 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7c39a21028ce9db with lease ID 0x8bd315100e7fffc: Processing first storage report for DS-67cb389a-8f09-4592-aada-bdbef3c99a67 from datanode DatanodeRegistration(127.0.0.1:46381, datanodeUuid=12e56a40-da9e-448c-8b67-5dbfa0a7e9db, infoPort=34315, infoSecurePort=0, ipcPort=40021, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534) 2024-11-11T07:00:31,735 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7c39a21028ce9db with lease ID 0x8bd315100e7fffc: from storage DS-67cb389a-8f09-4592-aada-bdbef3c99a67 node DatanodeRegistration(127.0.0.1:46381, datanodeUuid=12e56a40-da9e-448c-8b67-5dbfa0a7e9db, infoPort=34315, infoSecurePort=0, ipcPort=40021, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:31,770 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@458e5bcf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/java.io.tmpdir/jetty-localhost-39287-hadoop-hdfs-3_4_1-tests_jar-_-any-8340687644267219082/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:31,771 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b75ba45{HTTP/1.1, (http/1.1)}{localhost:39287} 2024-11-11T07:00:31,771 INFO [Time-limited test {}] server.Server(415): Started @115350ms 2024-11-11T07:00:31,772 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:00:31,805 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:31,810 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:00:31,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:00:31,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:00:31,811 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:00:31,811 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@495a6aea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:00:31,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15751333{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:00:31,876 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data8/current/BP-1526976113-172.17.0.2-1731308419534/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:31,876 WARN [Thread-861 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data7/current/BP-1526976113-172.17.0.2-1731308419534/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:31,898 WARN [Thread-841 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:00:31,900 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x760596663c21cf8e with lease ID 0x8bd315100e7fffd: Processing first storage report for DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc from datanode DatanodeRegistration(127.0.0.1:37637, datanodeUuid=533c26b8-d5ca-42ef-be06-bb74239cff60, infoPort=34089, infoSecurePort=0, ipcPort=37539, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534) 2024-11-11T07:00:31,900 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x760596663c21cf8e with lease ID 0x8bd315100e7fffd: from storage DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc node DatanodeRegistration(127.0.0.1:37637, datanodeUuid=533c26b8-d5ca-42ef-be06-bb74239cff60, infoPort=34089, infoSecurePort=0, ipcPort=37539, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:31,900 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x760596663c21cf8e with lease ID 0x8bd315100e7fffd: Processing first storage report for DS-456bde69-6b14-4f79-89ad-b50d6b4988df from datanode DatanodeRegistration(127.0.0.1:37637, datanodeUuid=533c26b8-d5ca-42ef-be06-bb74239cff60, infoPort=34089, infoSecurePort=0, ipcPort=37539, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534) 2024-11-11T07:00:31,900 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x760596663c21cf8e with lease ID 0x8bd315100e7fffd: from storage DS-456bde69-6b14-4f79-89ad-b50d6b4988df node DatanodeRegistration(127.0.0.1:37637, datanodeUuid=533c26b8-d5ca-42ef-be06-bb74239cff60, infoPort=34089, infoSecurePort=0, ipcPort=37539, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:31,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47d01054{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/java.io.tmpdir/jetty-localhost-42291-hadoop-hdfs-3_4_1-tests_jar-_-any-2331689386901118372/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:31,939 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17900346{HTTP/1.1, (http/1.1)}{localhost:42291} 2024-11-11T07:00:31,939 INFO [Time-limited test {}] server.Server(415): Started @115518ms 2024-11-11T07:00:31,941 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:00:32,039 WARN [Thread-887 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data9/current/BP-1526976113-172.17.0.2-1731308419534/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:32,039 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data10/current/BP-1526976113-172.17.0.2-1731308419534/current, will proceed with Du for space computation calculation, 2024-11-11T07:00:32,055 WARN [Thread-876 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:00:32,057 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x46a12efec5bc1e32 with lease ID 0x8bd315100e7fffe: Processing first storage report for DS-7065d851-b326-4459-9ca9-fab9b603bb46 from datanode DatanodeRegistration(127.0.0.1:35845, datanodeUuid=be8a8252-5a22-4f13-859c-4ea81211e880, infoPort=46439, infoSecurePort=0, ipcPort=45519, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534) 2024-11-11T07:00:32,057 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x46a12efec5bc1e32 with lease ID 0x8bd315100e7fffe: from storage DS-7065d851-b326-4459-9ca9-fab9b603bb46 node DatanodeRegistration(127.0.0.1:35845, datanodeUuid=be8a8252-5a22-4f13-859c-4ea81211e880, infoPort=46439, infoSecurePort=0, ipcPort=45519, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:32,058 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x46a12efec5bc1e32 with lease ID 0x8bd315100e7fffe: Processing first storage report for DS-24672bd7-7065-415f-ad40-c9dab57f1fb7 from datanode DatanodeRegistration(127.0.0.1:35845, datanodeUuid=be8a8252-5a22-4f13-859c-4ea81211e880, infoPort=46439, infoSecurePort=0, ipcPort=45519, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534) 2024-11-11T07:00:32,058 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x46a12efec5bc1e32 with lease ID 0x8bd315100e7fffe: from storage DS-24672bd7-7065-415f-ad40-c9dab57f1fb7 node DatanodeRegistration(127.0.0.1:35845, datanodeUuid=be8a8252-5a22-4f13-859c-4ea81211e880, infoPort=46439, infoSecurePort=0, ipcPort=45519, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T07:00:32,065 WARN [ResponseProcessor for block BP-1526976113-172.17.0.2-1731308419534:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1526976113-172.17.0.2-1731308419534:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,065 WARN [ResponseProcessor for block BP-1526976113-172.17.0.2-1731308419534:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1526976113-172.17.0.2-1731308419534:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,065 WARN [ResponseProcessor for block BP-1526976113-172.17.0.2-1731308419534:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1526976113-172.17.0.2-1731308419534:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,065 WARN [ResponseProcessor for block BP-1526976113-172.17.0.2-1731308419534:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1526976113-172.17.0.2-1731308419534:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,065 WARN [DataStreamer for file /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347/f334e8b0fbdf%2C40641%2C1731308420347.1731308420497 block BP-1526976113-172.17.0.2-1731308419534:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:32,065 WARN [DataStreamer for file /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta block BP-1526976113-172.17.0.2-1731308419534:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:32,065 WARN [DataStreamer for file /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 block BP-1526976113-172.17.0.2-1731308419534:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:32,066 WARN [DataStreamer for file /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 block BP-1526976113-172.17.0.2-1731308419534:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:32,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_293869770_22 at /127.0.0.1:35488 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35488 dst: /127.0.0.1:41209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:35466 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35466 dst: /127.0.0.1:41209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:59460 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59460 dst: /127.0.0.1:39455 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542335413_22 at /127.0.0.1:42580 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42580 dst: /127.0.0.1:39455 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_293869770_22 at /127.0.0.1:59508 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59508 dst: /127.0.0.1:39455 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:59468 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59468 dst: /127.0.0.1:39455 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542335413_22 at /127.0.0.1:39786 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39786 dst: /127.0.0.1:41209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21d5e4af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:32,068 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:35460 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35460 dst: /127.0.0.1:41209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,068 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:00:32,068 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:00:32,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:00:32,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,STOPPED} 2024-11-11T07:00:32,070 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:00:32,070 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:00:32,070 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:00:32,070 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1526976113-172.17.0.2-1731308419534 (Datanode Uuid 85195dfd-4a83-4b20-9c58-2eee99c5bac8) service to localhost/127.0.0.1:41533 2024-11-11T07:00:32,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data3/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:32,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data4/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:32,072 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:00:32,072 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@3de396e6 {}] datanode.DataXceiver(331): 127.0.0.1:41209:DataXceiver error processing unknown operation src: /127.0.0.1:47626 dst: /127.0.0.1:41209 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,072 WARN [DataStreamer for file /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347/f334e8b0fbdf%2C40641%2C1731308420347.1731308420497 block BP-1526976113-172.17.0.2-1731308419534:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,072 WARN [DataStreamer for file /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 block BP-1526976113-172.17.0.2-1731308419534:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,075 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@13e12de8 {}] datanode.DataXceiver(331): 127.0.0.1:41209:DataXceiver error processing unknown operation src: /127.0.0.1:47652 dst: /127.0.0.1:41209 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,076 WARN [DataStreamer for file /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 block BP-1526976113-172.17.0.2-1731308419534:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,076 WARN [ResponseProcessor for block BP-1526976113-172.17.0.2-1731308419534:blk_1073741834_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1526976113-172.17.0.2-1731308419534:blk_1073741834_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,076 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:47628 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47628 dst: /127.0.0.1:41209 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:32,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ab5393f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:32,077 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:00:32,077 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:00:32,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:00:32,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,STOPPED} 2024-11-11T07:00:32,079 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:00:32,079 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:00:32,079 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1526976113-172.17.0.2-1731308419534 (Datanode Uuid 500c043f-129e-4f6a-b5f3-a290998c9579) service to localhost/127.0.0.1:41533 2024-11-11T07:00:32,079 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:00:32,079 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data1/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:32,079 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data2/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:32,080 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:00:32,083 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3., hostname=f334e8b0fbdf,45321,1731308420401, seqNum=2] 2024-11-11T07:00:32,085 ERROR [FSHLog-0-hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842-prefix:f334e8b0fbdf,45321,1731308420401 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,085 WARN [FSHLog-0-hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842-prefix:f334e8b0fbdf,45321,1731308420401 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,085 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,085 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C45321%2C1731308420401:(num 1731308420824) roll requested 2024-11-11T07:00:32,086 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C45321%2C1731308420401.1731308432085 2024-11-11T07:00:32,091 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:32,091 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:32,091 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:32,091 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:32,091 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:32,091 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308432085 2024-11-11T07:00:32,092 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,092 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:32,092 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34089:34089),(127.0.0.1/127.0.0.1:46439:46439)] 2024-11-11T07:00:32,092 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 is not closed yet, will try archiving it next time 2024-11-11T07:00:32,093 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-11T07:00:32,093 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-11T07:00:32,093 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 2024-11-11T07:00:32,096 WARN [IPC Server handler 1 on default port 41533 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-11T07:00:32,100 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 after 5ms 2024-11-11T07:00:32,476 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:33,397 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:34,092 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:34,093 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308432085 2024-11-11T07:00:34,094 WARN [ResponseProcessor for block BP-1526976113-172.17.0.2-1731308419534:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1526976113-172.17.0.2-1731308419534:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:34,094 WARN [DataStreamer for file /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308432085 block BP-1526976113-172.17.0.2-1731308419534:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK], DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]) is bad. 2024-11-11T07:00:34,095 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:35154 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:37637:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35154 dst: /127.0.0.1:37637 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:34,095 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:60300 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35845:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60300 dst: /127.0.0.1:35845 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:34,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@458e5bcf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:34,097 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b75ba45{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:00:34,097 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:00:34,097 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3986ff43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:00:34,097 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fbc343d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,STOPPED} 2024-11-11T07:00:34,099 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:00:34,099 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1526976113-172.17.0.2-1731308419534 (Datanode Uuid 533c26b8-d5ca-42ef-be06-bb74239cff60) service to localhost/127.0.0.1:41533 2024-11-11T07:00:34,100 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data7/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:34,100 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data8/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:34,100 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:00:34,104 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:00:34,104 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:00:34,477 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:35,397 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:36,093 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:36,093 WARN [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]] 2024-11-11T07:00:36,093 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C45321%2C1731308420401:(num 1731308432085) roll requested 2024-11-11T07:00:36,094 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C45321%2C1731308420401.1731308436093 2024-11-11T07:00:36,098 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:60322 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data10]'}, localName='127.0.0.1:35845', datanodeUuid='be8a8252-5a22-4f13-859c-4ea81211e880', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741839_1021 to mirror 127.0.0.1:37637 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:36,098 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37637 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:36,098 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:60322 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T07:00:36,098 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK], DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]) is bad. 2024-11-11T07:00:36,098 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741839_1021 2024-11-11T07:00:36,098 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:60322 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:35845:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60322 dst: /127.0.0.1:35845 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:36,101 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK] 2024-11-11T07:00:36,101 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 after 4008ms 2024-11-11T07:00:36,104 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:36,104 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:36,104 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741840_1022 2024-11-11T07:00:36,105 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T07:00:36,105 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK] 2024-11-11T07:00:36,113 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:36,114 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:36,114 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:36,114 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:36,114 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:36,114 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308432085 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308436093 2024-11-11T07:00:36,115 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34315:34315),(127.0.0.1/127.0.0.1:46439:46439)] 2024-11-11T07:00:36,115 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 is not closed yet, will try archiving it next time 2024-11-11T07:00:36,115 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308432085 is not closed yet, will try archiving it next time 2024-11-11T07:00:36,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35845 is added to blk_1073741838_1020 (size=3600) 2024-11-11T07:00:36,477 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:36,517 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 is not closed yet, will try archiving it next time 2024-11-11T07:00:37,397 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,069 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5ff8ede9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35845, datanodeUuid=be8a8252-5a22-4f13-859c-4ea81211e880, infoPort=46439, infoSecurePort=0, ipcPort=45519, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534):Failed to transfer BP-1526976113-172.17.0.2-1731308419534:blk_1073741838_1020 to 127.0.0.1:37637 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,108 WARN [ResponseProcessor for block BP-1526976113-172.17.0.2-1731308419534:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1526976113-172.17.0.2-1731308419534:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-1526976113-172.17.0.2-1731308419534:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,109 WARN [DataStreamer for file /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308436093 block BP-1526976113-172.17.0.2-1731308419534:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:38,109 WARN [PacketResponder: BP-1526976113-172.17.0.2-1731308419534:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35845] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,109 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:38962 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38962 dst: /127.0.0.1:46381 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:60330 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:35845:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60330 dst: /127.0.0.1:35845 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,110 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47d01054{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:38,111 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17900346{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:00:38,111 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:00:38,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15751333{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:00:38,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@495a6aea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,STOPPED} 2024-11-11T07:00:38,112 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:00:38,113 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1526976113-172.17.0.2-1731308419534 (Datanode Uuid be8a8252-5a22-4f13-859c-4ea81211e880) service to localhost/127.0.0.1:41533 2024-11-11T07:00:38,113 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:00:38,113 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:00:38,113 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data9/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:38,113 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data10/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:00:38,113 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:00:38,115 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,115 WARN [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]] 2024-11-11T07:00:38,115 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C45321%2C1731308420401:(num 1731308436093) roll requested 2024-11-11T07:00:38,116 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C45321%2C1731308420401.1731308438115 2024-11-11T07:00:38,120 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41209 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,119 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:38982 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741842_1025] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741842_1025 to mirror 127.0.0.1:41209 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,120 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]) is bad. 2024-11-11T07:00:38,120 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741842_1025 2024-11-11T07:00:38,120 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:38982 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741842_1025] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T07:00:38,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:38982 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38982 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,120 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK] 2024-11-11T07:00:38,121 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,122 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:38,122 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741843_1026 2024-11-11T07:00:38,122 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45321 {}] regionserver.HRegion(8855): Flush requested on b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:38,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b2b5aa4a37c19b1f701e750c2db983e3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T07:00:38,124 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37637 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,124 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:38984 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741844_1027 to mirror 127.0.0.1:37637 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,124 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]) is bad. 2024-11-11T07:00:38,125 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741844_1027 2024-11-11T07:00:38,125 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:38984 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T07:00:38,125 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:38984 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38984 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,125 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK] 2024-11-11T07:00:38,129 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39455 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39000 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741845_1028 to mirror 127.0.0.1:39455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,129 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:38,129 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741845_1028 2024-11-11T07:00:38,129 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39000 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T07:00:38,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39000 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39000 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,130 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK] 2024-11-11T07:00:38,131 WARN [IPC Server handler 0 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T07:00:38,131 WARN [IPC Server handler 0 on default port 41533 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T07:00:38,131 WARN [IPC Server handler 0 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T07:00:38,142 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:38,142 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:38,142 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:38,142 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:38,142 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:38,142 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308436093 with entries=12, filesize=12.96 KB; new WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308438115 2024-11-11T07:00:38,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/e20cbf4bedba4069a3746dd249c38d3c is 1080, key is row0002/info:/1731308434101/Put/seqid=0 2024-11-11T07:00:38,144 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34315:34315)] 2024-11-11T07:00:38,144 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 is not closed yet, will try archiving it next time 2024-11-11T07:00:38,144 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308436093 is not closed yet, will try archiving it next time 2024-11-11T07:00:38,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741841_1024 (size=13274) 2024-11-11T07:00:38,144 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,144 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]) is bad. 2024-11-11T07:00:38,144 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741847_1030 2024-11-11T07:00:38,145 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK] 2024-11-11T07:00:38,146 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,146 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:38,146 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741848_1031 2024-11-11T07:00:38,147 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK] 2024-11-11T07:00:38,148 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,148 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:38,148 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741849_1032 2024-11-11T07:00:38,149 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:38,151 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41209 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,151 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39018 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741850_1033 to mirror 127.0.0.1:41209 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,151 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]) is bad. 2024-11-11T07:00:38,151 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741850_1033 2024-11-11T07:00:38,151 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39018 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T07:00:38,151 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39018 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39018 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:38,152 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK] 2024-11-11T07:00:38,152 WARN [IPC Server handler 1 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T07:00:38,152 WARN [IPC Server handler 1 on default port 41533 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T07:00:38,153 WARN [IPC Server handler 1 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T07:00:38,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741851_1034 (size=10347) 2024-11-11T07:00:38,477 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,545 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 is not closed yet, will try archiving it next time 2024-11-11T07:00:38,556 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/e20cbf4bedba4069a3746dd249c38d3c 2024-11-11T07:00:38,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/e20cbf4bedba4069a3746dd249c38d3c as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/e20cbf4bedba4069a3746dd249c38d3c 2024-11-11T07:00:38,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/e20cbf4bedba4069a3746dd249c38d3c, entries=5, sequenceid=11, filesize=10.1 K 2024-11-11T07:00:38,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for b2b5aa4a37c19b1f701e750c2db983e3 in 446ms, sequenceid=11, compaction requested=false 2024-11-11T07:00:38,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b2b5aa4a37c19b1f701e750c2db983e3: 2024-11-11T07:00:38,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45321 {}] regionserver.HRegion(8855): Flush requested on b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:38,744 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b2b5aa4a37c19b1f701e750c2db983e3 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-11T07:00:38,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/09182c896d974f8aa50a53f66286a2ba is 1080, key is row0007/info:/1731308438125/Put/seqid=0 2024-11-11T07:00:38,750 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,750 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:38,750 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741852_1035 2024-11-11T07:00:38,751 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK] 2024-11-11T07:00:38,752 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,752 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK], DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]) is bad. 2024-11-11T07:00:38,752 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741853_1036 2024-11-11T07:00:38,753 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK] 2024-11-11T07:00:38,754 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,754 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]) is bad. 2024-11-11T07:00:38,754 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741854_1037 2024-11-11T07:00:38,755 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK] 2024-11-11T07:00:38,756 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:38,756 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:38,756 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741855_1038 2024-11-11T07:00:38,756 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:38,757 WARN [IPC Server handler 2 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T07:00:38,757 WARN [IPC Server handler 2 on default port 41533 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T07:00:38,757 WARN [IPC Server handler 2 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T07:00:38,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741856_1039 (size=12506) 2024-11-11T07:00:38,760 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/09182c896d974f8aa50a53f66286a2ba 2024-11-11T07:00:38,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/09182c896d974f8aa50a53f66286a2ba as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/09182c896d974f8aa50a53f66286a2ba 2024-11-11T07:00:38,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/09182c896d974f8aa50a53f66286a2ba, entries=7, sequenceid=24, filesize=12.2 K 2024-11-11T07:00:38,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=0 B/0 for b2b5aa4a37c19b1f701e750c2db983e3 in 28ms, sequenceid=24, compaction requested=false 2024-11-11T07:00:38,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b2b5aa4a37c19b1f701e750c2db983e3: 2024-11-11T07:00:38,773 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-11T07:00:38,773 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:00:38,773 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/09182c896d974f8aa50a53f66286a2ba because midkey is the same as first or last row 2024-11-11T07:00:39,398 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,144 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,145 WARN [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]] 2024-11-11T07:00:40,145 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C45321%2C1731308420401:(num 1731308438115) roll requested 2024-11-11T07:00:40,145 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C45321%2C1731308420401.1731308440145 2024-11-11T07:00:40,149 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37637 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,149 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39056 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741857_1040 to mirror 127.0.0.1:37637 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:40,149 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]) is bad. 2024-11-11T07:00:40,149 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741857_1040 2024-11-11T07:00:40,149 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39056 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T07:00:40,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39056 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39056 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:40,150 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK] 2024-11-11T07:00:40,152 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39455 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,152 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39070 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741858_1041 to mirror 127.0.0.1:39455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:40,152 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:40,152 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39070 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T07:00:40,152 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741858_1041 2024-11-11T07:00:40,152 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39070 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39070 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:40,153 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK] 2024-11-11T07:00:40,154 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,154 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]) is bad. 2024-11-11T07:00:40,154 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741859_1042 2024-11-11T07:00:40,154 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK] 2024-11-11T07:00:40,156 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,156 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:40,156 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741860_1043 2024-11-11T07:00:40,156 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:40,157 WARN [IPC Server handler 3 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T07:00:40,157 WARN [IPC Server handler 3 on default port 41533 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T07:00:40,157 WARN [IPC Server handler 3 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T07:00:40,159 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:40,159 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:40,159 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:40,160 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:40,160 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:40,160 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308438115 with entries=12, filesize=11.36 KB; new WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308440145 2024-11-11T07:00:40,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741846_1029 (size=11642) 2024-11-11T07:00:40,165 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34315:34315)] 2024-11-11T07:00:40,165 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 is not closed yet, will try archiving it next time 2024-11-11T07:00:40,165 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308438115 is not closed yet, will try archiving it next time 2024-11-11T07:00:40,165 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308432085 to hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/oldWALs/f334e8b0fbdf%2C45321%2C1731308420401.1731308432085 2024-11-11T07:00:40,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45321 {}] regionserver.HRegion(8855): Flush requested on b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:40,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b2b5aa4a37c19b1f701e750c2db983e3 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T07:00:40,167 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308436093 to hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/oldWALs/f334e8b0fbdf%2C45321%2C1731308420401.1731308436093 2024-11-11T07:00:40,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/b9b730e0f781472ea1e8af89af8e9672 is 1079, key is tmprow/info:/1731308440160/Put/seqid=0 2024-11-11T07:00:40,172 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,172 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK], DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]) is bad. 2024-11-11T07:00:40,172 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741862_1045 2024-11-11T07:00:40,172 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK] 2024-11-11T07:00:40,174 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,174 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK], DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:40,174 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741863_1046 2024-11-11T07:00:40,174 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:40,175 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,175 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:40,175 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741864_1047 2024-11-11T07:00:40,176 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK] 2024-11-11T07:00:40,177 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,177 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]) is bad. 2024-11-11T07:00:40,177 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741865_1048 2024-11-11T07:00:40,177 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK] 2024-11-11T07:00:40,178 WARN [IPC Server handler 1 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T07:00:40,178 WARN [IPC Server handler 1 on default port 41533 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T07:00:40,178 WARN [IPC Server handler 1 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T07:00:40,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741866_1049 (size=6027) 2024-11-11T07:00:40,478 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,562 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 is not closed yet, will try archiving it next time 2024-11-11T07:00:40,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/b9b730e0f781472ea1e8af89af8e9672 2024-11-11T07:00:40,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/b9b730e0f781472ea1e8af89af8e9672 as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/b9b730e0f781472ea1e8af89af8e9672 2024-11-11T07:00:40,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/b9b730e0f781472ea1e8af89af8e9672, entries=1, sequenceid=34, filesize=5.9 K 2024-11-11T07:00:40,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b2b5aa4a37c19b1f701e750c2db983e3 in 428ms, sequenceid=34, compaction requested=true 2024-11-11T07:00:40,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b2b5aa4a37c19b1f701e750c2db983e3: 2024-11-11T07:00:40,595 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-11T07:00:40,595 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:00:40,595 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/09182c896d974f8aa50a53f66286a2ba because midkey is the same as first or last row 2024-11-11T07:00:40,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2b5aa4a37c19b1f701e750c2db983e3:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T07:00:40,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:00:40,595 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T07:00:40,597 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T07:00:40,597 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.HStore(1541): b2b5aa4a37c19b1f701e750c2db983e3/info is initiating minor compaction (all files) 2024-11-11T07:00:40,597 INFO [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b2b5aa4a37c19b1f701e750c2db983e3/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:40,597 INFO [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/e20cbf4bedba4069a3746dd249c38d3c, hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/09182c896d974f8aa50a53f66286a2ba, hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/b9b730e0f781472ea1e8af89af8e9672] into tmpdir=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp, totalSize=28.2 K 2024-11-11T07:00:40,597 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] compactions.Compactor(225): Compacting e20cbf4bedba4069a3746dd249c38d3c, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731308434101 2024-11-11T07:00:40,598 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] compactions.Compactor(225): Compacting 09182c896d974f8aa50a53f66286a2ba, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731308438125 2024-11-11T07:00:40,598 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] compactions.Compactor(225): Compacting b9b730e0f781472ea1e8af89af8e9672, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731308440160 2024-11-11T07:00:40,612 INFO [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2b5aa4a37c19b1f701e750c2db983e3#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:00:40,613 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/3b80c36e62fb4cd9af926fa86f4f8366 is 1080, key is row0002/info:/1731308434101/Put/seqid=0 2024-11-11T07:00:40,614 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,614 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:40,614 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741867_1050 2024-11-11T07:00:40,615 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:40,616 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,616 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK], DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]) is bad. 2024-11-11T07:00:40,616 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741868_1051 2024-11-11T07:00:40,617 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK] 2024-11-11T07:00:40,617 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,618 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:40,618 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741869_1052 2024-11-11T07:00:40,618 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK] 2024-11-11T07:00:40,620 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41209 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:40,620 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39106 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741870_1053 to mirror 127.0.0.1:41209 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:40,620 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]) is bad. 2024-11-11T07:00:40,621 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741870_1053 2024-11-11T07:00:40,621 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39106 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T07:00:40,621 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:39106 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39106 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:40,621 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK] 2024-11-11T07:00:40,622 WARN [IPC Server handler 2 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T07:00:40,622 WARN [IPC Server handler 2 on default port 41533 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T07:00:40,622 WARN [IPC Server handler 2 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T07:00:40,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741871_1054 (size=17994) 2024-11-11T07:00:41,032 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/3b80c36e62fb4cd9af926fa86f4f8366 as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/3b80c36e62fb4cd9af926fa86f4f8366 2024-11-11T07:00:41,039 INFO [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b2b5aa4a37c19b1f701e750c2db983e3/info of b2b5aa4a37c19b1f701e750c2db983e3 into 3b80c36e62fb4cd9af926fa86f4f8366(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:00:41,039 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b2b5aa4a37c19b1f701e750c2db983e3: 2024-11-11T07:00:41,039 INFO [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3., storeName=b2b5aa4a37c19b1f701e750c2db983e3/info, priority=13, startTime=1731308440595; duration=0sec 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/3b80c36e62fb4cd9af926fa86f4f8366 because midkey is the same as first or last row 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/3b80c36e62fb4cd9af926fa86f4f8366 because midkey is the same as first or last row 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/3b80c36e62fb4cd9af926fa86f4f8366 because midkey is the same as first or last row 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:00:41,040 DEBUG [RS:0;f334e8b0fbdf:45321-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2b5aa4a37c19b1f701e750c2db983e3:info 2024-11-11T07:00:41,398 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:41,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45321 {}] regionserver.HRegion(8855): Flush requested on b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:41,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b2b5aa4a37c19b1f701e750c2db983e3 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T07:00:41,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/0b8083ae3e4c4fe2b2e5980e0d92c6f7 is 1079, key is tmprow/info:/1731308441582/Put/seqid=0 2024-11-11T07:00:41,589 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:41,589 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]) is bad. 2024-11-11T07:00:41,589 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741872_1055 2024-11-11T07:00:41,590 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39455,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK] 2024-11-11T07:00:41,591 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:41,591 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]) is bad. 2024-11-11T07:00:41,591 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741873_1056 2024-11-11T07:00:41,592 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK] 2024-11-11T07:00:41,594 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35845 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:41,594 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:58352 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741874_1057 to mirror 127.0.0.1:35845 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:41,594 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:41,594 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741874_1057 2024-11-11T07:00:41,594 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:58352 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T07:00:41,594 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:58352 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58352 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:41,594 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:41,596 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41209 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:41,596 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:58360 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741875_1058 to mirror 127.0.0.1:41209 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:41,597 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]) is bad. 2024-11-11T07:00:41,597 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741875_1058 2024-11-11T07:00:41,597 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:58360 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T07:00:41,597 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:58360 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58360 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:41,597 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK] 2024-11-11T07:00:41,598 WARN [IPC Server handler 4 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T07:00:41,598 WARN [IPC Server handler 4 on default port 41533 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T07:00:41,598 WARN [IPC Server handler 4 on default port 41533 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T07:00:41,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741876_1059 (size=6027) 2024-11-11T07:00:41,735 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@275a21e1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=12e56a40-da9e-448c-8b67-5dbfa0a7e9db, infoPort=34315, infoSecurePort=0, ipcPort=40021, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534):Failed to transfer BP-1526976113-172.17.0.2-1731308419534:blk_1073741851_1034 to 127.0.0.1:39455 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:41,735 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@299dcaa2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=12e56a40-da9e-448c-8b67-5dbfa0a7e9db, infoPort=34315, infoSecurePort=0, ipcPort=40021, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534):Failed to transfer BP-1526976113-172.17.0.2-1731308419534:blk_1073741841_1024 to 127.0.0.1:41209 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:42,001 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/0b8083ae3e4c4fe2b2e5980e0d92c6f7 2024-11-11T07:00:42,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/0b8083ae3e4c4fe2b2e5980e0d92c6f7 as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/0b8083ae3e4c4fe2b2e5980e0d92c6f7 2024-11-11T07:00:42,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/0b8083ae3e4c4fe2b2e5980e0d92c6f7, entries=1, sequenceid=45, filesize=5.9 K 2024-11-11T07:00:42,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b2b5aa4a37c19b1f701e750c2db983e3 in 431ms, sequenceid=45, compaction requested=false 2024-11-11T07:00:42,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b2b5aa4a37c19b1f701e750c2db983e3: 2024-11-11T07:00:42,014 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-11T07:00:42,014 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:00:42,014 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/3b80c36e62fb4cd9af926fa86f4f8366 because midkey is the same as first or last row 2024-11-11T07:00:42,165 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:42,165 WARN [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-11T07:00:42,197 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:00:42,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:00:42,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:00:42,201 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:00:42,201 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:00:42,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2dee3ed2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:00:42,202 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c92a539{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:00:42,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2aaad1eb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/java.io.tmpdir/jetty-localhost-44343-hadoop-hdfs-3_4_1-tests_jar-_-any-14974313773678699314/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:00:42,317 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41b6b906{HTTP/1.1, (http/1.1)}{localhost:44343} 2024-11-11T07:00:42,317 INFO [Time-limited test {}] server.Server(415): Started @125896ms 2024-11-11T07:00:42,319 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:00:42,422 WARN [Thread-982 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:00:42,426 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6cdeca9c7f6a0930 with lease ID 0x8bd315100e7ffff: from storage DS-838013a3-793d-45af-bf61-73d95dc42912 node DatanodeRegistration(127.0.0.1:34539, datanodeUuid=85195dfd-4a83-4b20-9c58-2eee99c5bac8, infoPort=42483, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T07:00:42,426 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6cdeca9c7f6a0930 with lease ID 0x8bd315100e7ffff: from storage DS-d28ad5a6-0a53-4114-ac14-f452714625d5 node DatanodeRegistration(127.0.0.1:34539, datanodeUuid=85195dfd-4a83-4b20-9c58-2eee99c5bac8, infoPort=42483, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:00:42,478 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:42,735 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@275a21e1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=12e56a40-da9e-448c-8b67-5dbfa0a7e9db, infoPort=34315, infoSecurePort=0, ipcPort=40021, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534):Failed to transfer BP-1526976113-172.17.0.2-1731308419534:blk_1073741846_1029 to 127.0.0.1:37637 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:42,735 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@299dcaa2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=12e56a40-da9e-448c-8b67-5dbfa0a7e9db, infoPort=34315, infoSecurePort=0, ipcPort=40021, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534):Failed to transfer BP-1526976113-172.17.0.2-1731308419534:blk_1073741856_1039 to 127.0.0.1:37637 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:43,398 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:44,166 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:44,479 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:44,735 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@299dcaa2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=12e56a40-da9e-448c-8b67-5dbfa0a7e9db, infoPort=34315, infoSecurePort=0, ipcPort=40021, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534):Failed to transfer BP-1526976113-172.17.0.2-1731308419534:blk_1073741866_1049 to 127.0.0.1:35845 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:44,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741871_1054 (size=17994) 2024-11-11T07:00:45,399 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:45,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741876_1059 (size=6027) 2024-11-11T07:00:46,166 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:46,479 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:47,399 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:48,166 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:48,479 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:49,400 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:50,167 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:50,327 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T07:00:50,480 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:50,571 ERROR [FSHLog-0-hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData-prefix:f334e8b0fbdf,40641,1731308420347 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:50,571 WARN [FSHLog-0-hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData-prefix:f334e8b0fbdf,40641,1731308420347 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:50,571 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C40641%2C1731308420347:(num 1731308420497) roll requested 2024-11-11T07:00:50,572 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C40641%2C1731308420347.1731308450571 2024-11-11T07:00:50,574 WARN [Thread-1002 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:50,575 WARN [Thread-1002 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK], DatanodeInfoWithStorage[127.0.0.1:34539,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:50,575 WARN [Thread-1002 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741877_1060 2024-11-11T07:00:50,575 WARN [Thread-1002 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:50,577 WARN [Thread-1002 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37637 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:50,577 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542335413_22 at /127.0.0.1:59998 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data4]'}, localName='127.0.0.1:34539', datanodeUuid='85195dfd-4a83-4b20-9c58-2eee99c5bac8', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741878_1061 to mirror 127.0.0.1:37637 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:50,578 WARN [Thread-1002 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34539,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK]) is bad. 2024-11-11T07:00:50,578 WARN [Thread-1002 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741878_1061 2024-11-11T07:00:50,578 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1542335413_22 at /127.0.0.1:59998 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T07:00:50,578 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542335413_22 at /127.0.0.1:59998 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:34539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59998 dst: /127.0.0.1:34539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:50,578 WARN [Thread-1002 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37637,DS-8d6b80b5-03d1-4c86-86d4-7f51cfd849bc,DISK] 2024-11-11T07:00:50,582 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:50,582 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:50,582 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:50,582 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:50,582 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:50,582 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347/f334e8b0fbdf%2C40641%2C1731308420347.1731308420497 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347/f334e8b0fbdf%2C40641%2C1731308420347.1731308450571 2024-11-11T07:00:50,583 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:50,583 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:50,583 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347/f334e8b0fbdf%2C40641%2C1731308420347.1731308420497 2024-11-11T07:00:50,583 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42483:42483),(127.0.0.1/127.0.0.1:34315:34315)] 2024-11-11T07:00:50,583 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347/f334e8b0fbdf%2C40641%2C1731308420347.1731308420497 is not closed yet, will try archiving it next time 2024-11-11T07:00:50,583 WARN [IPC Server handler 1 on default port 41533 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347/f334e8b0fbdf%2C40641%2C1731308420347.1731308420497 has not been closed. Lease recovery is in progress. RecoveryId = 1063 for block blk_1073741830_1006 2024-11-11T07:00:50,584 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347/f334e8b0fbdf%2C40641%2C1731308420347.1731308420497 after 0ms 2024-11-11T07:00:51,400 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:52,167 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:53,400 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:54,167 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:54,585 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/WALs/f334e8b0fbdf,40641,1731308420347/f334e8b0fbdf%2C40641%2C1731308420347.1731308420497 after 4002ms 2024-11-11T07:00:55,401 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:56,168 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:56,428 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@21e8273d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34539, datanodeUuid=85195dfd-4a83-4b20-9c58-2eee99c5bac8, infoPort=42483, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534):Failed to transfer BP-1526976113-172.17.0.2-1731308419534:blk_1073741836_1012 to 127.0.0.1:35845 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:56,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:00:57,401 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:57,428 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@21e8273d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34539, datanodeUuid=85195dfd-4a83-4b20-9c58-2eee99c5bac8, infoPort=42483, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534):Failed to transfer BP-1526976113-172.17.0.2-1731308419534:blk_1073741826_1002 to 127.0.0.1:35845 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:57,428 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@773ba02a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34539, datanodeUuid=85195dfd-4a83-4b20-9c58-2eee99c5bac8, infoPort=42483, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534):Failed to transfer BP-1526976113-172.17.0.2-1731308419534:blk_1073741828_1004 to 127.0.0.1:35845 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:58,152 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C45321%2C1731308420401.1731308458152 2024-11-11T07:00:58,156 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35845 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,156 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542335413_22 at /127.0.0.1:45536 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741880_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741880_1064 to mirror 127.0.0.1:35845 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:58,156 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741880_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:58,156 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741880_1064 2024-11-11T07:00:58,156 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1542335413_22 at /127.0.0.1:45536 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741880_1064] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T07:00:58,156 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542335413_22 at /127.0.0.1:45536 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741880_1064] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45536 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:58,157 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:58,161 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,161 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,161 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,161 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,161 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,162 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308440145 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308458152 2024-11-11T07:00:58,162 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42483:42483),(127.0.0.1/127.0.0.1:34315:34315)] 2024-11-11T07:00:58,162 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 is not closed yet, will try archiving it next time 2024-11-11T07:00:58,162 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308440145 is not closed yet, will try archiving it next time 2024-11-11T07:00:58,163 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308438115 to hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/oldWALs/f334e8b0fbdf%2C45321%2C1731308420401.1731308438115 2024-11-11T07:00:58,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741861_1044 (size=13591) 2024-11-11T07:00:58,168 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,168 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-11T07:00:58,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45321 {}] regionserver.HRegion(8855): Flush requested on b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:58,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b2b5aa4a37c19b1f701e750c2db983e3 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T07:00:58,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/b5dcbf30717a4ba2875c5eef99d5638a is 1080, key is row0013/info:/1731308458164/Put/seqid=0 2024-11-11T07:00:58,180 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:60526 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741882_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data4]'}, localName='127.0.0.1:34539', datanodeUuid='85195dfd-4a83-4b20-9c58-2eee99c5bac8', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741882_1066 to mirror 127.0.0.1:35845 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:58,180 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35845 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,180 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34539,DS-838013a3-793d-45af-bf61-73d95dc42912,DISK], DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:58,180 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:60526 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741882_1066] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T07:00:58,180 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741882_1066 2024-11-11T07:00:58,180 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:60526 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741882_1066] {}] datanode.DataXceiver(331): 127.0.0.1:34539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60526 dst: /127.0.0.1:34539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:58,181 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:58,183 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T07:00:58,184 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:00:58,184 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:00:58,184 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:58,184 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:58,184 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T07:00:58,184 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=984390422, stopped=false 2024-11-11T07:00:58,184 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T07:00:58,184 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f334e8b0fbdf,40641,1731308420347 2024-11-11T07:00:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42847-0x10105d3e7790002, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42847-0x10105d3e7790002, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:58,186 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:00:58,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741883_1067 (size=11421) 2024-11-11T07:00:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:00:58,186 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:00:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:00:58,186 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:00:58,187 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:58,187 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f334e8b0fbdf,45321,1731308420401' ***** 2024-11-11T07:00:58,187 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T07:00:58,187 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f334e8b0fbdf,42847,1731308421347' ***** 2024-11-11T07:00:58,187 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T07:00:58,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741883_1067 (size=11421) 2024-11-11T07:00:58,187 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T07:00:58,187 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T07:00:58,187 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T07:00:58,187 INFO [RS:1;f334e8b0fbdf:42847 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T07:00:58,187 INFO [RS:1;f334e8b0fbdf:42847 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T07:00:58,187 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(959): stopping server f334e8b0fbdf,42847,1731308421347 2024-11-11T07:00:58,187 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:00:58,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:58,188 INFO [RS:1;f334e8b0fbdf:42847 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;f334e8b0fbdf:42847. 2024-11-11T07:00:58,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42847-0x10105d3e7790002, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:58,188 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/b5dcbf30717a4ba2875c5eef99d5638a 2024-11-11T07:00:58,188 DEBUG [RS:1;f334e8b0fbdf:42847 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:00:58,188 DEBUG [RS:1;f334e8b0fbdf:42847 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:58,188 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(976): stopping server f334e8b0fbdf,42847,1731308421347; all regions closed. 2024-11-11T07:00:58,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:00:58,188 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,188 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,189 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,189 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,189 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,190 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,190 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,190 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 2024-11-11T07:00:58,190 WARN [IPC Server handler 1 on default port 41533 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 has not been closed. Lease recovery is in progress. RecoveryId = 1068 for block blk_1073741837_1013 2024-11-11T07:00:58,191 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 after 0ms 2024-11-11T07:00:58,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/b5dcbf30717a4ba2875c5eef99d5638a as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/b5dcbf30717a4ba2875c5eef99d5638a 2024-11-11T07:00:58,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/b5dcbf30717a4ba2875c5eef99d5638a, entries=6, sequenceid=55, filesize=11.2 K 2024-11-11T07:00:58,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=5.25 KB/5380 for b2b5aa4a37c19b1f701e750c2db983e3 in 29ms, sequenceid=55, compaction requested=true 2024-11-11T07:00:58,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b2b5aa4a37c19b1f701e750c2db983e3: 2024-11-11T07:00:58,201 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-11T07:00:58,201 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:00:58,201 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/3b80c36e62fb4cd9af926fa86f4f8366 because midkey is the same as first or last row 2024-11-11T07:00:58,201 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T07:00:58,201 INFO [RS:0;f334e8b0fbdf:45321 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T07:00:58,202 INFO [RS:0;f334e8b0fbdf:45321 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T07:00:58,202 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(3091): Received CLOSE for b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:58,202 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(959): stopping server f334e8b0fbdf,45321,1731308420401 2024-11-11T07:00:58,202 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:00:58,202 INFO [RS:0;f334e8b0fbdf:45321 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f334e8b0fbdf:45321. 2024-11-11T07:00:58,202 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b2b5aa4a37c19b1f701e750c2db983e3, disabling compactions & flushes 2024-11-11T07:00:58,202 DEBUG [RS:0;f334e8b0fbdf:45321 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:00:58,202 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:58,202 DEBUG [RS:0;f334e8b0fbdf:45321 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:00:58,202 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:58,202 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. after waiting 0 ms 2024-11-11T07:00:58,202 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:58,202 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T07:00:58,202 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T07:00:58,202 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T07:00:58,202 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing b2b5aa4a37c19b1f701e750c2db983e3 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-11T07:00:58,202 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T07:00:58,203 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T07:00:58,203 DEBUG [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(1325): Online Regions={b2b5aa4a37c19b1f701e750c2db983e3=TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T07:00:58,203 DEBUG [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b2b5aa4a37c19b1f701e750c2db983e3 2024-11-11T07:00:58,203 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:00:58,203 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:00:58,203 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:00:58,203 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:00:58,203 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:00:58,203 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-11T07:00:58,203 ERROR [FSHLog-0-hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842-prefix:f334e8b0fbdf,45321,1731308420401.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,204 WARN [FSHLog-0-hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842-prefix:f334e8b0fbdf,45321,1731308420401.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,204 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C45321%2C1731308420401.meta:.meta(num 1731308421182) roll requested 2024-11-11T07:00:58,204 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308458204.meta 2024-11-11T07:00:58,210 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/2c267792baa64efdb5584709e680bca5 is 1080, key is row0018/info:/1731308458173/Put/seqid=0 2024-11-11T07:00:58,211 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,212 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,212 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,212 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,212 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,212 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35845 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,212 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:45570 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741885_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741885_1070 to mirror 127.0.0.1:35845 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:58,212 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741885_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:58,212 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308458204.meta 2024-11-11T07:00:58,212 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741885_1070 2024-11-11T07:00:58,212 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:45570 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741885_1070] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T07:00:58,212 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:45570 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741885_1070] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45570 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:58,212 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,213 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41209,DS-ee7401ad-9fdf-497c-b3b3-49bb6a922e81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,213 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta 2024-11-11T07:00:58,213 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:58,213 WARN [IPC Server handler 1 on default port 41533 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741834_1017 2024-11-11T07:00:58,213 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34315:34315),(127.0.0.1/127.0.0.1:42483:42483)] 2024-11-11T07:00:58,213 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta after 0ms 2024-11-11T07:00:58,213 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta is not closed yet, will try archiving it next time 2024-11-11T07:00:58,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741886_1072 (size=10347) 2024-11-11T07:00:58,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741886_1072 (size=10347) 2024-11-11T07:00:58,219 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/2c267792baa64efdb5584709e680bca5 2024-11-11T07:00:58,226 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/.tmp/info/2c267792baa64efdb5584709e680bca5 as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/2c267792baa64efdb5584709e680bca5 2024-11-11T07:00:58,232 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/2c267792baa64efdb5584709e680bca5, entries=5, sequenceid=63, filesize=10.1 K 2024-11-11T07:00:58,232 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/.tmp/info/9afd09850d1e42849ad9b444202e01eb is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3./info:regioninfo/1731308421788/Put/seqid=0 2024-11-11T07:00:58,233 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for b2b5aa4a37c19b1f701e750c2db983e3 in 31ms, sequenceid=63, compaction requested=true 2024-11-11T07:00:58,234 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/e20cbf4bedba4069a3746dd249c38d3c, hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/09182c896d974f8aa50a53f66286a2ba, hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/b9b730e0f781472ea1e8af89af8e9672] to archive 2024-11-11T07:00:58,235 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T07:00:58,237 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/e20cbf4bedba4069a3746dd249c38d3c to hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/e20cbf4bedba4069a3746dd249c38d3c 2024-11-11T07:00:58,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741887_1073 (size=7089) 2024-11-11T07:00:58,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741887_1073 (size=7089) 2024-11-11T07:00:58,238 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/.tmp/info/9afd09850d1e42849ad9b444202e01eb 2024-11-11T07:00:58,239 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/09182c896d974f8aa50a53f66286a2ba to hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/09182c896d974f8aa50a53f66286a2ba 2024-11-11T07:00:58,240 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/b9b730e0f781472ea1e8af89af8e9672 to hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/info/b9b730e0f781472ea1e8af89af8e9672 2024-11-11T07:00:58,240 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f334e8b0fbdf:40641 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-11T07:00:58,241 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e20cbf4bedba4069a3746dd249c38d3c=10347, 09182c896d974f8aa50a53f66286a2ba=12506, b9b730e0f781472ea1e8af89af8e9672=6027] 2024-11-11T07:00:58,245 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b2b5aa4a37c19b1f701e750c2db983e3/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=1 2024-11-11T07:00:58,245 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:58,245 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b2b5aa4a37c19b1f701e750c2db983e3: Waiting for close lock at 1731308458202Running coprocessor pre-close hooks at 1731308458202Disabling compacts and flushes for region at 1731308458202Disabling writes for close at 1731308458202Obtaining lock to block concurrent updates at 1731308458202Preparing flush snapshotting stores in b2b5aa4a37c19b1f701e750c2db983e3 at 1731308458202Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3., syncing WAL and waiting on mvcc, flushsize=dataSize=5380, getHeapSize=6000, getOffHeapSize=0, getCellsCount=5 at 1731308458203 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. at 1731308458203Flushing b2b5aa4a37c19b1f701e750c2db983e3/info: creating writer at 1731308458204 (+1 ms)Flushing b2b5aa4a37c19b1f701e750c2db983e3/info: appending metadata at 1731308458209 (+5 ms)Flushing b2b5aa4a37c19b1f701e750c2db983e3/info: closing flushed file at 1731308458209Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d6aef58: reopening flushed file at 1731308458225 (+16 ms)Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for b2b5aa4a37c19b1f701e750c2db983e3 in 31ms, sequenceid=63, compaction requested=true at 1731308458233 (+8 ms)Writing region close event to WAL at 1731308458241 (+8 ms)Running coprocessor post-close hooks at 1731308458245 (+4 ms)Closed at 1731308458245 2024-11-11T07:00:58,246 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731308421429.b2b5aa4a37c19b1f701e750c2db983e3. 2024-11-11T07:00:58,259 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/.tmp/ns/533d6af7a52b45669afa08253260de35 is 43, key is default/ns:d/1731308421227/Put/seqid=0 2024-11-11T07:00:58,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741888_1074 (size=5153) 2024-11-11T07:00:58,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741888_1074 (size=5153) 2024-11-11T07:00:58,265 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/.tmp/ns/533d6af7a52b45669afa08253260de35 2024-11-11T07:00:58,286 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/.tmp/table/e51c396e74af45f3b5e13bcca5158c72 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731308421799/Put/seqid=0 2024-11-11T07:00:58,288 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35845 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:00:58,288 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:45610 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741889_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='12e56a40-da9e-448c-8b67-5dbfa0a7e9db', xmitsInProgress=0}:Exception transferring block BP-1526976113-172.17.0.2-1731308419534:blk_1073741889_1075 to mirror 127.0.0.1:35845 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:58,289 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1526976113-172.17.0.2-1731308419534:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-31f412c9-cb6c-41c5-b526-09a900da6299,DISK], DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK]) is bad. 2024-11-11T07:00:58,289 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-1526976113-172.17.0.2-1731308419534:blk_1073741889_1075 2024-11-11T07:00:58,289 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:45610 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741889_1075] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T07:00:58,289 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-496239605_22 at /127.0.0.1:45610 [Receiving block BP-1526976113-172.17.0.2-1731308419534:blk_1073741889_1075] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45610 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:00:58,289 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35845,DS-7065d851-b326-4459-9ca9-fab9b603bb46,DISK] 2024-11-11T07:00:58,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741890_1076 (size=5424) 2024-11-11T07:00:58,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741890_1076 (size=5424) 2024-11-11T07:00:58,294 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/.tmp/table/e51c396e74af45f3b5e13bcca5158c72 2024-11-11T07:00:58,300 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/.tmp/info/9afd09850d1e42849ad9b444202e01eb as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/info/9afd09850d1e42849ad9b444202e01eb 2024-11-11T07:00:58,306 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/info/9afd09850d1e42849ad9b444202e01eb, entries=10, sequenceid=11, filesize=6.9 K 2024-11-11T07:00:58,307 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/.tmp/ns/533d6af7a52b45669afa08253260de35 as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/ns/533d6af7a52b45669afa08253260de35 2024-11-11T07:00:58,312 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/ns/533d6af7a52b45669afa08253260de35, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T07:00:58,313 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/.tmp/table/e51c396e74af45f3b5e13bcca5158c72 as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/table/e51c396e74af45f3b5e13bcca5158c72 2024-11-11T07:00:58,318 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/table/e51c396e74af45f3b5e13bcca5158c72, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T07:00:58,319 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 116ms, sequenceid=11, compaction requested=false 2024-11-11T07:00:58,324 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T07:00:58,324 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:00:58,325 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:00:58,325 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308458203Running coprocessor pre-close hooks at 1731308458203Disabling compacts and flushes for region at 1731308458203Disabling writes for close at 1731308458203Obtaining lock to block concurrent updates at 1731308458203Preparing flush snapshotting stores in 1588230740 at 1731308458203Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731308458203Flushing stores of hbase:meta,,1.1588230740 at 1731308458214 (+11 ms)Flushing 1588230740/info: creating writer at 1731308458214Flushing 1588230740/info: appending metadata at 1731308458232 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731308458232Flushing 1588230740/ns: creating writer at 1731308458244 (+12 ms)Flushing 1588230740/ns: appending metadata at 1731308458259 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731308458259Flushing 1588230740/table: creating writer at 1731308458271 (+12 ms)Flushing 1588230740/table: appending metadata at 1731308458285 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731308458285Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fe23dd4: reopening flushed file at 1731308458299 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d12aa6e: reopening flushed file at 1731308458306 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10c51340: reopening flushed file at 1731308458312 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 116ms, sequenceid=11, compaction requested=false at 1731308458319 (+7 ms)Writing region close event to WAL at 1731308458321 (+2 ms)Running coprocessor post-close hooks at 1731308458324 (+3 ms)Closed at 1731308458325 (+1 ms) 2024-11-11T07:00:58,325 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T07:00:58,398 INFO [regionserver/f334e8b0fbdf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T07:00:58,398 INFO [regionserver/f334e8b0fbdf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T07:00:58,403 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(976): stopping server f334e8b0fbdf,45321,1731308420401; all regions closed. 2024-11-11T07:00:58,403 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,404 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,404 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,404 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,404 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:00:58,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741884_1069 (size=825) 2024-11-11T07:00:58,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741884_1069 (size=825) 2024-11-11T07:00:58,564 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 is not closed yet, will try archiving it next time 2024-11-11T07:00:58,564 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308440145 to hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/oldWALs/f334e8b0fbdf%2C45321%2C1731308420401.1731308440145 2024-11-11T07:00:58,657 INFO [regionserver/f334e8b0fbdf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:00:58,693 INFO [regionserver/f334e8b0fbdf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T07:00:58,693 INFO [regionserver/f334e8b0fbdf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T07:00:59,398 INFO [regionserver/f334e8b0fbdf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:00:59,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:00:59,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:00:59,736 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@275a21e1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=12e56a40-da9e-448c-8b67-5dbfa0a7e9db, infoPort=34315, infoSecurePort=0, ipcPort=40021, storageInfo=lv=-57;cid=testClusterID;nsid=896387497;c=1731308419534):Failed to transfer BP-1526976113-172.17.0.2-1731308419534:blk_1073741861_1044 to 127.0.0.1:35845 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:00,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:01:01,321 INFO [master/f334e8b0fbdf:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T07:01:01,321 INFO [master/f334e8b0fbdf:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T07:01:01,447 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@10b2f26e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1526976113-172.17.0.2-1731308419534:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:41209,null,null]) java.net.ConnectException: Call From f334e8b0fbdf/172.17.0.2 to localhost:33097 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T07:01:01,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741830_1063 (size=27306) 2024-11-11T07:01:01,605 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T07:01:01,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:01:01,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T07:01:02,191 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 after 4001ms 2024-11-11T07:01:02,214 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta after 4001ms 2024-11-11T07:01:02,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741835_1011 (size=393) 2024-11-11T07:01:02,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:01:03,190 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-11T07:01:03,192 DEBUG [RS:1;f334e8b0fbdf:42847 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/oldWALs 2024-11-11T07:01:03,192 INFO [RS:1;f334e8b0fbdf:42847 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C42847%2C1731308421347:(num 1731308421530) 2024-11-11T07:01:03,192 DEBUG [RS:1;f334e8b0fbdf:42847 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:01:03,192 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:01:03,192 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:01:03,192 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.ChoreService(370): Chore service for: regionserver/f334e8b0fbdf:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T07:01:03,193 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T07:01:03,193 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T07:01:03,193 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T07:01:03,193 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:01:03,193 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:01:03,193 INFO [RS:1;f334e8b0fbdf:42847 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42847 2024-11-11T07:01:03,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:01:03,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42847-0x10105d3e7790002, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f334e8b0fbdf,42847,1731308421347 2024-11-11T07:01:03,195 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:01:03,197 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f334e8b0fbdf,42847,1731308421347] 2024-11-11T07:01:03,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:03,203 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f334e8b0fbdf,42847,1731308421347 already deleted, retry=false 2024-11-11T07:01:03,203 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f334e8b0fbdf,42847,1731308421347 expired; onlineServers=1 2024-11-11T07:01:03,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,260 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42847-0x10105d3e7790002, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:01:03,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42847-0x10105d3e7790002, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:01:03,297 INFO [RS:1;f334e8b0fbdf:42847 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:01:03,297 INFO [RS:1;f334e8b0fbdf:42847 {}] regionserver.HRegionServer(1031): Exiting; stopping=f334e8b0fbdf,42847,1731308421347; zookeeper connection closed. 2024-11-11T07:01:03,297 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6c252aab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6c252aab 2024-11-11T07:01:03,404 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-11T07:01:03,408 DEBUG [RS:0;f334e8b0fbdf:45321 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/oldWALs 2024-11-11T07:01:03,408 INFO [RS:0;f334e8b0fbdf:45321 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C45321%2C1731308420401.meta:.meta(num 1731308458204) 2024-11-11T07:01:03,408 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:03,408 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:03,409 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:03,409 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:03,409 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:03,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741881_1065 (size=13514) 2024-11-11T07:01:03,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741881_1065 (size=13514) 2024-11-11T07:01:03,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741830_1063 (size=27306) 2024-11-11T07:01:03,771 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T07:01:03,787 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,787 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:03,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:04,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:04,214 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-11T07:01:04,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:05,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:05,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:06,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:06,214 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-11T07:01:06,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:07,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:07,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:08,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:08,214 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-11T07:01:08,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:08,409 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-11T07:01:08,412 DEBUG [RS:0;f334e8b0fbdf:45321 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/oldWALs 2024-11-11T07:01:08,413 INFO [RS:0;f334e8b0fbdf:45321 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C45321%2C1731308420401:(num 1731308458152) 2024-11-11T07:01:08,413 DEBUG [RS:0;f334e8b0fbdf:45321 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:01:08,413 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:01:08,413 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:01:08,413 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.ChoreService(370): Chore service for: regionserver/f334e8b0fbdf:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T07:01:08,413 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:01:08,413 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:01:08,413 INFO [RS:0;f334e8b0fbdf:45321 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45321 2024-11-11T07:01:08,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f334e8b0fbdf,45321,1731308420401 2024-11-11T07:01:08,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:01:08,416 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:01:08,417 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f334e8b0fbdf,45321,1731308420401] 2024-11-11T07:01:08,418 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f334e8b0fbdf,45321,1731308420401 already deleted, retry=false 2024-11-11T07:01:08,418 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f334e8b0fbdf,45321,1731308420401 expired; onlineServers=0 2024-11-11T07:01:08,418 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f334e8b0fbdf,40641,1731308420347' ***** 2024-11-11T07:01:08,418 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T07:01:08,418 INFO [M:0;f334e8b0fbdf:40641 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:01:08,418 INFO [M:0;f334e8b0fbdf:40641 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:01:08,418 DEBUG [M:0;f334e8b0fbdf:40641 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T07:01:08,419 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T07:01:08,419 DEBUG [M:0;f334e8b0fbdf:40641 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T07:01:08,419 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308420572 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308420572,5,FailOnTimeoutGroup] 2024-11-11T07:01:08,419 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308420572 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308420572,5,FailOnTimeoutGroup] 2024-11-11T07:01:08,419 INFO [M:0;f334e8b0fbdf:40641 {}] hbase.ChoreService(370): Chore service for: master/f334e8b0fbdf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T07:01:08,419 INFO [M:0;f334e8b0fbdf:40641 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:01:08,419 DEBUG [M:0;f334e8b0fbdf:40641 {}] master.HMaster(1795): Stopping service threads 2024-11-11T07:01:08,419 INFO [M:0;f334e8b0fbdf:40641 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T07:01:08,419 INFO [M:0;f334e8b0fbdf:40641 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:01:08,420 INFO [M:0;f334e8b0fbdf:40641 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T07:01:08,420 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T07:01:08,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T07:01:08,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:08,421 DEBUG [M:0;f334e8b0fbdf:40641 {}] zookeeper.ZKUtil(347): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T07:01:08,421 WARN [M:0;f334e8b0fbdf:40641 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T07:01:08,422 INFO [M:0;f334e8b0fbdf:40641 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/.lastflushedseqids 2024-11-11T07:01:08,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741891_1077 (size=130) 2024-11-11T07:01:08,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741891_1077 (size=130) 2024-11-11T07:01:08,429 INFO [M:0;f334e8b0fbdf:40641 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T07:01:08,429 INFO [M:0;f334e8b0fbdf:40641 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T07:01:08,429 DEBUG [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:01:08,429 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:08,429 DEBUG [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:08,429 DEBUG [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:01:08,429 DEBUG [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:08,429 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-11T07:01:08,447 DEBUG [M:0;f334e8b0fbdf:40641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a395cf27a2f949bfb9361e21e0e69c03 is 82, key is hbase:meta,,1/info:regioninfo/1731308421210/Put/seqid=0 2024-11-11T07:01:08,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741892_1078 (size=5672) 2024-11-11T07:01:08,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741892_1078 (size=5672) 2024-11-11T07:01:08,453 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a395cf27a2f949bfb9361e21e0e69c03 2024-11-11T07:01:08,474 DEBUG [M:0;f334e8b0fbdf:40641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1cb7dd52a2624b84a3ecaff5c6ca6346 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731308421804/Put/seqid=0 2024-11-11T07:01:08,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741893_1079 (size=6255) 2024-11-11T07:01:08,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741893_1079 (size=6255) 2024-11-11T07:01:08,479 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1cb7dd52a2624b84a3ecaff5c6ca6346 2024-11-11T07:01:08,484 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1cb7dd52a2624b84a3ecaff5c6ca6346 2024-11-11T07:01:08,498 DEBUG [M:0;f334e8b0fbdf:40641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fbc0ac1a7be74d03a22108f56a6e084f is 69, key is f334e8b0fbdf,42847,1731308421347/rs:state/1731308421382/Put/seqid=0 2024-11-11T07:01:08,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741894_1080 (size=5224) 2024-11-11T07:01:08,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741894_1080 (size=5224) 2024-11-11T07:01:08,503 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fbc0ac1a7be74d03a22108f56a6e084f 2024-11-11T07:01:08,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:01:08,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45321-0x10105d3e7790001, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:01:08,517 INFO [RS:0;f334e8b0fbdf:45321 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:01:08,517 INFO [RS:0;f334e8b0fbdf:45321 {}] regionserver.HRegionServer(1031): Exiting; stopping=f334e8b0fbdf,45321,1731308420401; zookeeper connection closed. 2024-11-11T07:01:08,517 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1558d468 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1558d468 2024-11-11T07:01:08,517 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-11T07:01:08,522 DEBUG [M:0;f334e8b0fbdf:40641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/769c3bf7c35d4cffb1b3d4efda8207a9 is 52, key is load_balancer_on/state:d/1731308421331/Put/seqid=0 2024-11-11T07:01:08,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741895_1081 (size=5056) 2024-11-11T07:01:08,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741895_1081 (size=5056) 2024-11-11T07:01:08,527 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/769c3bf7c35d4cffb1b3d4efda8207a9 2024-11-11T07:01:08,533 DEBUG [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a395cf27a2f949bfb9361e21e0e69c03 as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a395cf27a2f949bfb9361e21e0e69c03 2024-11-11T07:01:08,538 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a395cf27a2f949bfb9361e21e0e69c03, entries=8, sequenceid=60, filesize=5.5 K 2024-11-11T07:01:08,539 DEBUG [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1cb7dd52a2624b84a3ecaff5c6ca6346 as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1cb7dd52a2624b84a3ecaff5c6ca6346 2024-11-11T07:01:08,544 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1cb7dd52a2624b84a3ecaff5c6ca6346 2024-11-11T07:01:08,545 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1cb7dd52a2624b84a3ecaff5c6ca6346, entries=6, sequenceid=60, filesize=6.1 K 2024-11-11T07:01:08,546 DEBUG [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fbc0ac1a7be74d03a22108f56a6e084f as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fbc0ac1a7be74d03a22108f56a6e084f 2024-11-11T07:01:08,551 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fbc0ac1a7be74d03a22108f56a6e084f, entries=2, sequenceid=60, filesize=5.1 K 2024-11-11T07:01:08,552 DEBUG [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/769c3bf7c35d4cffb1b3d4efda8207a9 as hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/769c3bf7c35d4cffb1b3d4efda8207a9 2024-11-11T07:01:08,556 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/769c3bf7c35d4cffb1b3d4efda8207a9, entries=1, sequenceid=60, filesize=4.9 K 2024-11-11T07:01:08,557 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=60, compaction requested=false 2024-11-11T07:01:08,559 INFO [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:08,559 DEBUG [M:0;f334e8b0fbdf:40641 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308468429Disabling compacts and flushes for region at 1731308468429Disabling writes for close at 1731308468429Obtaining lock to block concurrent updates at 1731308468429Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731308468429Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731308468430 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731308468430Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731308468430Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731308468446 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731308468446Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731308468458 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731308468473 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731308468473Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731308468484 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731308468498 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731308468498Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731308468508 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731308468522 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731308468522Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@abf5e48: reopening flushed file at 1731308468533 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6563026f: reopening flushed file at 1731308468538 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b7497f5: reopening flushed file at 1731308468545 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68dfbb7a: reopening flushed file at 1731308468551 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=60, compaction requested=false at 1731308468557 (+6 ms)Writing region close event to WAL at 1731308468559 (+2 ms)Closed at 1731308468559 2024-11-11T07:01:08,559 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:08,559 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:08,560 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:08,560 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:08,560 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:08,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741879_1062 (size=1045) 2024-11-11T07:01:08,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34539 is added to blk_1073741879_1062 (size=1045) 2024-11-11T07:01:08,562 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:01:08,562 INFO [M:0;f334e8b0fbdf:40641 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T07:01:08,562 INFO [M:0;f334e8b0fbdf:40641 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40641 2024-11-11T07:01:08,563 INFO [M:0;f334e8b0fbdf:40641 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:01:08,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:01:08,665 INFO [M:0;f334e8b0fbdf:40641 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:01:08,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40641-0x10105d3e7790000, quorum=127.0.0.1:50876, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:01:08,667 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2aaad1eb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:08,668 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41b6b906{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:01:08,668 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:01:08,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c92a539{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:01:08,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2dee3ed2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,STOPPED} 2024-11-11T07:01:08,669 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:01:08,669 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:01:08,669 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1526976113-172.17.0.2-1731308419534 (Datanode Uuid 85195dfd-4a83-4b20-9c58-2eee99c5bac8) service to localhost/127.0.0.1:41533 2024-11-11T07:01:08,669 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:01:08,669 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@709d0d21 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1526976113-172.17.0.2-1731308419534:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:41209,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:33097 , LocalHost:localPort f334e8b0fbdf/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T07:01:08,670 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@709d0d21 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1526976113-172.17.0.2-1731308419534:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:34539,null,null], DatanodeInfoWithStorage[127.0.0.1:41209,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1526976113-172.17.0.2-1731308419534 2024-11-11T07:01:08,670 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data3/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:08,670 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data4/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:08,671 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:01:08,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65266587{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:08,673 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e157477{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:01:08,673 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:01:08,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@132d95f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:01:08,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75255721{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,STOPPED} 2024-11-11T07:01:08,675 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:01:08,675 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:01:08,675 WARN [BP-1526976113-172.17.0.2-1731308419534 heartbeating to localhost/127.0.0.1:41533 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1526976113-172.17.0.2-1731308419534 (Datanode Uuid 12e56a40-da9e-448c-8b67-5dbfa0a7e9db) service to localhost/127.0.0.1:41533 2024-11-11T07:01:08,675 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:01:08,676 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data5/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:08,676 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/cluster_21d1161f-8192-83d8-8044-495357a0c439/data/data6/current/BP-1526976113-172.17.0.2-1731308419534 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:08,676 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:01:08,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cd2a640{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:01:08,682 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:01:08,682 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:01:08,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:01:08,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir/,STOPPED} 2024-11-11T07:01:08,690 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T07:01:08,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T07:01:08,738 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 78) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fbeccbf46e8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41533 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:41533 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41533 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fbeccbf46e8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41533 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41533 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:45449 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fbeccbf46e8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41533 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41533 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41533 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41533 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45449 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41533 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:41533 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=436 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=31 (was 50), ProcessCount=11 (was 11), AvailableMemoryMB=9083 (was 9549) 2024-11-11T07:01:08,745 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=436, MaxFileDescriptor=1048576, SystemLoadAverage=31, ProcessCount=11, AvailableMemoryMB=9082 2024-11-11T07:01:08,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T07:01:08,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.log.dir so I do NOT create it in target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34 2024-11-11T07:01:08,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e02c6b19-c971-9c84-6546-e3eda2bfa349/hadoop.tmp.dir so I do NOT create it in target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34 2024-11-11T07:01:08,746 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7, deleteOnExit=true 2024-11-11T07:01:08,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T07:01:08,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/test.cache.data in system properties and HBase conf 2024-11-11T07:01:08,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T07:01:08,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir in system properties and HBase conf 2024-11-11T07:01:08,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T07:01:08,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T07:01:08,747 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/nfs.dump.dir in system properties and HBase conf 2024-11-11T07:01:08,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/java.io.tmpdir in system properties and HBase conf 2024-11-11T07:01:08,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:01:08,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T07:01:08,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T07:01:08,760 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:01:08,826 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:08,833 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:01:08,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:01:08,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:01:08,834 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:01:08,835 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:08,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27a49013{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:01:08,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@662aecf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:01:08,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@347a2271{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/java.io.tmpdir/jetty-localhost-46815-hadoop-hdfs-3_4_1-tests_jar-_-any-2644500963838542687/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:01:08,951 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@464ea64c{HTTP/1.1, (http/1.1)}{localhost:46815} 2024-11-11T07:01:08,951 INFO [Time-limited test {}] server.Server(415): Started @152530ms 2024-11-11T07:01:08,965 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:01:09,021 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:09,024 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:01:09,025 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:01:09,025 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:01:09,025 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:01:09,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@165d0fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:01:09,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d6c2bfb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:01:09,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:09,141 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@109832d2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/java.io.tmpdir/jetty-localhost-39189-hadoop-hdfs-3_4_1-tests_jar-_-any-12750614780689867508/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:09,141 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5fec76d{HTTP/1.1, (http/1.1)}{localhost:39189} 2024-11-11T07:01:09,142 INFO [Time-limited test {}] server.Server(415): Started @152721ms 2024-11-11T07:01:09,143 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:01:09,173 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:09,177 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:01:09,177 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:01:09,178 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:01:09,178 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:01:09,178 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fd7563{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:01:09,179 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@471bebfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:01:09,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:09,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:09,244 WARN [Thread-1181 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data1/current/BP-880503148-172.17.0.2-1731308468769/current, will proceed with Du for space computation calculation, 2024-11-11T07:01:09,244 WARN [Thread-1182 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data2/current/BP-880503148-172.17.0.2-1731308468769/current, will proceed with Du for space computation calculation, 2024-11-11T07:01:09,260 WARN [Thread-1160 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:01:09,263 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1088dcb5a6a78de3 with lease ID 0xaced1f65c36f4d25: Processing first storage report for DS-5f2f2062-f0de-4847-951f-d3ab6291782d from datanode DatanodeRegistration(127.0.0.1:45161, datanodeUuid=0a7f41fb-f647-4208-a734-53cac447cc83, infoPort=32895, infoSecurePort=0, ipcPort=34553, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769) 2024-11-11T07:01:09,263 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1088dcb5a6a78de3 with lease ID 0xaced1f65c36f4d25: from storage DS-5f2f2062-f0de-4847-951f-d3ab6291782d node DatanodeRegistration(127.0.0.1:45161, datanodeUuid=0a7f41fb-f647-4208-a734-53cac447cc83, infoPort=32895, infoSecurePort=0, ipcPort=34553, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:09,263 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1088dcb5a6a78de3 with lease ID 0xaced1f65c36f4d25: Processing first storage report for DS-33da8be0-11b8-4944-8536-65e9ee1f4458 from datanode DatanodeRegistration(127.0.0.1:45161, datanodeUuid=0a7f41fb-f647-4208-a734-53cac447cc83, infoPort=32895, infoSecurePort=0, ipcPort=34553, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769) 2024-11-11T07:01:09,263 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1088dcb5a6a78de3 with lease ID 0xaced1f65c36f4d25: from storage DS-33da8be0-11b8-4944-8536-65e9ee1f4458 node DatanodeRegistration(127.0.0.1:45161, datanodeUuid=0a7f41fb-f647-4208-a734-53cac447cc83, infoPort=32895, infoSecurePort=0, ipcPort=34553, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:09,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2835f29c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/java.io.tmpdir/jetty-localhost-38039-hadoop-hdfs-3_4_1-tests_jar-_-any-8413345359939902038/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:09,296 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14a79ae9{HTTP/1.1, (http/1.1)}{localhost:38039} 2024-11-11T07:01:09,296 INFO [Time-limited test {}] server.Server(415): Started @152876ms 2024-11-11T07:01:09,298 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:01:09,391 WARN [Thread-1207 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data3/current/BP-880503148-172.17.0.2-1731308468769/current, will proceed with Du for space computation calculation, 2024-11-11T07:01:09,391 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data4/current/BP-880503148-172.17.0.2-1731308468769/current, will proceed with Du for space computation calculation, 2024-11-11T07:01:09,407 WARN [Thread-1196 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:01:09,409 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d889d58306e9df with lease ID 0xaced1f65c36f4d26: Processing first storage report for DS-cc5f5bb1-4040-439b-aebf-559025392dc9 from datanode DatanodeRegistration(127.0.0.1:34231, datanodeUuid=8c75707b-f307-4efb-bc9b-19cfb4a5bb0c, infoPort=33209, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769) 2024-11-11T07:01:09,409 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d889d58306e9df with lease ID 0xaced1f65c36f4d26: from storage DS-cc5f5bb1-4040-439b-aebf-559025392dc9 node DatanodeRegistration(127.0.0.1:34231, datanodeUuid=8c75707b-f307-4efb-bc9b-19cfb4a5bb0c, infoPort=33209, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:09,410 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d889d58306e9df with lease ID 0xaced1f65c36f4d26: Processing first storage report for DS-2b708c8b-f0c2-4169-b431-4db5714d5e16 from datanode DatanodeRegistration(127.0.0.1:34231, datanodeUuid=8c75707b-f307-4efb-bc9b-19cfb4a5bb0c, infoPort=33209, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769) 2024-11-11T07:01:09,410 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d889d58306e9df with lease ID 0xaced1f65c36f4d26: from storage DS-2b708c8b-f0c2-4169-b431-4db5714d5e16 node DatanodeRegistration(127.0.0.1:34231, datanodeUuid=8c75707b-f307-4efb-bc9b-19cfb4a5bb0c, infoPort=33209, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T07:01:09,420 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34 2024-11-11T07:01:09,423 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/zookeeper_0, clientPort=56983, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T07:01:09,424 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56983 2024-11-11T07:01:09,424 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:09,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:09,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:01:09,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34231 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:01:09,437 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634 with version=8 2024-11-11T07:01:09,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/hbase-staging 2024-11-11T07:01:09,439 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:01:09,439 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:09,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:09,440 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:01:09,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:09,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:01:09,440 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T07:01:09,440 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:01:09,441 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41265 2024-11-11T07:01:09,442 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41265 connecting to ZooKeeper ensemble=127.0.0.1:56983 2024-11-11T07:01:09,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:412650x0, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:01:09,448 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41265-0x10105d4a73e0000 connected 2024-11-11T07:01:09,463 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:09,465 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:09,467 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:01:09,467 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634, hbase.cluster.distributed=false 2024-11-11T07:01:09,469 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:01:09,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41265 2024-11-11T07:01:09,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41265 2024-11-11T07:01:09,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41265 2024-11-11T07:01:09,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41265 2024-11-11T07:01:09,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41265 2024-11-11T07:01:09,486 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:01:09,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:09,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:09,486 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:01:09,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:09,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:01:09,486 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T07:01:09,486 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:01:09,487 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37089 2024-11-11T07:01:09,488 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37089 connecting to ZooKeeper ensemble=127.0.0.1:56983 2024-11-11T07:01:09,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:09,491 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:09,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370890x0, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:01:09,495 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:01:09,495 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37089-0x10105d4a73e0001 connected 2024-11-11T07:01:09,496 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T07:01:09,496 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T07:01:09,497 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T07:01:09,498 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:01:09,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37089 2024-11-11T07:01:09,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37089 2024-11-11T07:01:09,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37089 2024-11-11T07:01:09,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37089 2024-11-11T07:01:09,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37089 2024-11-11T07:01:09,517 DEBUG [M:0;f334e8b0fbdf:41265 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f334e8b0fbdf:41265 2024-11-11T07:01:09,517 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f334e8b0fbdf,41265,1731308469439 2024-11-11T07:01:09,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:01:09,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:01:09,520 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f334e8b0fbdf,41265,1731308469439 2024-11-11T07:01:09,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:09,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T07:01:09,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:09,522 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T07:01:09,522 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f334e8b0fbdf,41265,1731308469439 from backup master directory 2024-11-11T07:01:09,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f334e8b0fbdf,41265,1731308469439 2024-11-11T07:01:09,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:01:09,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:01:09,525 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:01:09,525 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f334e8b0fbdf,41265,1731308469439 2024-11-11T07:01:09,529 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/hbase.id] with ID: 1e00b434-7822-4056-8fde-ffb82d290b25 2024-11-11T07:01:09,529 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/.tmp/hbase.id 2024-11-11T07:01:09,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:01:09,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34231 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:01:09,537 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/.tmp/hbase.id]:[hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/hbase.id] 2024-11-11T07:01:09,550 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:09,550 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T07:01:09,551 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T07:01:09,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:09,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:09,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:01:09,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34231 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:01:09,563 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T07:01:09,563 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T07:01:09,564 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:01:09,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:01:09,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34231 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:01:09,574 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store 2024-11-11T07:01:09,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34231 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:01:09,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:01:09,582 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:09,583 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:01:09,583 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:09,583 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:09,583 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:01:09,583 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:09,583 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:09,583 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308469583Disabling compacts and flushes for region at 1731308469583Disabling writes for close at 1731308469583Writing region close event to WAL at 1731308469583Closed at 1731308469583 2024-11-11T07:01:09,584 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/.initializing 2024-11-11T07:01:09,584 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439 2024-11-11T07:01:09,586 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C41265%2C1731308469439, suffix=, logDir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439, archiveDir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/oldWALs, maxLogs=10 2024-11-11T07:01:09,586 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 2024-11-11T07:01:09,591 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 2024-11-11T07:01:09,592 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32895:32895),(127.0.0.1/127.0.0.1:33209:33209)] 2024-11-11T07:01:09,593 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:01:09,593 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:09,593 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,593 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T07:01:09,596 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:09,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:09,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,597 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T07:01:09,597 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:09,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:01:09,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T07:01:09,599 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:09,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:01:09,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T07:01:09,600 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:09,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:01:09,601 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,601 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,602 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,603 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,603 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,603 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T07:01:09,604 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:09,606 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:01:09,607 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760770, jitterRate=-0.032631561160087585}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T07:01:09,607 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731308469593Initializing all the Stores at 1731308469594 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308469594Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308469594Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308469594Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308469594Cleaning up temporary data from old regions at 1731308469603 (+9 ms)Region opened successfully at 1731308469607 (+4 ms) 2024-11-11T07:01:09,607 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T07:01:09,610 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@606da7f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:01:09,611 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T07:01:09,611 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T07:01:09,611 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T07:01:09,611 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T07:01:09,612 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T07:01:09,612 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T07:01:09,612 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T07:01:09,614 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T07:01:09,615 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T07:01:09,616 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T07:01:09,616 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T07:01:09,617 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T07:01:09,618 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T07:01:09,618 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T07:01:09,619 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T07:01:09,623 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T07:01:09,624 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T07:01:09,626 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T07:01:09,628 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T07:01:09,630 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T07:01:09,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:01:09,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:01:09,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:09,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:09,632 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f334e8b0fbdf,41265,1731308469439, sessionid=0x10105d4a73e0000, setting cluster-up flag (Was=false) 2024-11-11T07:01:09,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:09,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:09,644 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T07:01:09,646 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,41265,1731308469439 2024-11-11T07:01:09,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:09,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:09,655 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T07:01:09,656 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,41265,1731308469439 2024-11-11T07:01:09,658 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T07:01:09,660 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T07:01:09,660 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T07:01:09,660 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T07:01:09,660 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f334e8b0fbdf,41265,1731308469439 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T07:01:09,661 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:01:09,662 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:01:09,662 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:01:09,662 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:01:09,662 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f334e8b0fbdf:0, corePoolSize=10, maxPoolSize=10 2024-11-11T07:01:09,662 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,662 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:01:09,662 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,663 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731308499663 2024-11-11T07:01:09,663 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T07:01:09,663 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T07:01:09,663 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T07:01:09,663 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T07:01:09,663 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T07:01:09,663 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T07:01:09,664 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,664 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:01:09,664 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T07:01:09,664 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T07:01:09,664 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T07:01:09,664 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T07:01:09,665 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:09,665 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T07:01:09,667 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T07:01:09,667 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T07:01:09,667 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308469667,5,FailOnTimeoutGroup] 2024-11-11T07:01:09,667 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308469667,5,FailOnTimeoutGroup] 2024-11-11T07:01:09,668 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,668 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T07:01:09,668 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,668 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:01:09,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34231 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:01:09,678 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T07:01:09,678 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634 2024-11-11T07:01:09,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:01:09,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34231 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:01:09,689 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:09,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:01:09,691 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:01:09,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:09,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:09,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:01:09,693 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:01:09,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:09,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:09,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:01:09,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:01:09,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:09,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:09,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:01:09,697 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:01:09,697 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:09,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:09,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:01:09,698 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740 2024-11-11T07:01:09,698 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740 2024-11-11T07:01:09,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:01:09,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:01:09,700 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:01:09,701 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:01:09,703 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:01:09,704 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=720780, jitterRate=-0.08348162472248077}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:01:09,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731308469689Initializing all the Stores at 1731308469690 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308469690Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308469690Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308469690Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308469690Cleaning up temporary data from old regions at 1731308469700 (+10 ms)Region opened successfully at 1731308469704 (+4 ms) 2024-11-11T07:01:09,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:01:09,705 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:01:09,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:01:09,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:01:09,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:01:09,705 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:01:09,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308469704Disabling compacts and flushes for region at 1731308469704Disabling writes for close at 1731308469705 (+1 ms)Writing region close event to WAL at 1731308469705Closed at 1731308469705 2024-11-11T07:01:09,706 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:01:09,706 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T07:01:09,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T07:01:09,707 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(746): ClusterId : 1e00b434-7822-4056-8fde-ffb82d290b25 2024-11-11T07:01:09,707 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T07:01:09,708 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:01:09,709 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T07:01:09,709 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T07:01:09,709 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T07:01:09,712 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T07:01:09,712 DEBUG [RS:0;f334e8b0fbdf:37089 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6852f4c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:01:09,730 DEBUG [RS:0;f334e8b0fbdf:37089 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f334e8b0fbdf:37089 2024-11-11T07:01:09,730 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T07:01:09,730 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T07:01:09,730 DEBUG [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T07:01:09,731 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(2659): reportForDuty to master=f334e8b0fbdf,41265,1731308469439 with port=37089, startcode=1731308469485 2024-11-11T07:01:09,731 DEBUG [RS:0;f334e8b0fbdf:37089 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T07:01:09,733 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43517, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T07:01:09,734 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41265 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:09,734 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41265 {}] master.ServerManager(517): Registering regionserver=f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:09,735 DEBUG [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634 2024-11-11T07:01:09,736 DEBUG [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41569 2024-11-11T07:01:09,736 DEBUG [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T07:01:09,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:01:09,739 DEBUG [RS:0;f334e8b0fbdf:37089 {}] zookeeper.ZKUtil(111): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:09,739 WARN [RS:0;f334e8b0fbdf:37089 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:01:09,739 INFO [RS:0;f334e8b0fbdf:37089 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:01:09,739 DEBUG [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:09,739 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f334e8b0fbdf,37089,1731308469485] 2024-11-11T07:01:09,743 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T07:01:09,744 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T07:01:09,744 INFO [RS:0;f334e8b0fbdf:37089 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T07:01:09,744 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,745 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T07:01:09,745 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T07:01:09,745 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:01:09,746 DEBUG [RS:0;f334e8b0fbdf:37089 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:01:09,746 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,747 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,747 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,747 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,747 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,747 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,37089,1731308469485-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:01:09,762 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T07:01:09,762 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,37089,1731308469485-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,762 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,762 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.Replication(171): f334e8b0fbdf,37089,1731308469485 started 2024-11-11T07:01:09,775 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:09,775 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(1482): Serving as f334e8b0fbdf,37089,1731308469485, RpcServer on f334e8b0fbdf/172.17.0.2:37089, sessionid=0x10105d4a73e0001 2024-11-11T07:01:09,775 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T07:01:09,776 DEBUG [RS:0;f334e8b0fbdf:37089 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:09,776 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,37089,1731308469485' 2024-11-11T07:01:09,776 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T07:01:09,776 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T07:01:09,777 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T07:01:09,777 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T07:01:09,777 DEBUG [RS:0;f334e8b0fbdf:37089 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:09,777 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,37089,1731308469485' 2024-11-11T07:01:09,777 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T07:01:09,777 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T07:01:09,777 DEBUG [RS:0;f334e8b0fbdf:37089 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T07:01:09,777 INFO [RS:0;f334e8b0fbdf:37089 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T07:01:09,777 INFO [RS:0;f334e8b0fbdf:37089 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T07:01:09,860 WARN [f334e8b0fbdf:41265 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T07:01:09,879 INFO [RS:0;f334e8b0fbdf:37089 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C37089%2C1731308469485, suffix=, logDir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485, archiveDir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/oldWALs, maxLogs=32 2024-11-11T07:01:09,880 INFO [RS:0;f334e8b0fbdf:37089 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 2024-11-11T07:01:09,886 INFO [RS:0;f334e8b0fbdf:37089 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 2024-11-11T07:01:09,887 DEBUG [RS:0;f334e8b0fbdf:37089 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33209:33209),(127.0.0.1/127.0.0.1:32895:32895)] 2024-11-11T07:01:10,110 DEBUG [f334e8b0fbdf:41265 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T07:01:10,110 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:10,112 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,37089,1731308469485, state=OPENING 2024-11-11T07:01:10,113 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T07:01:10,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:10,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:10,115 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:01:10,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:01:10,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:01:10,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,37089,1731308469485}] 2024-11-11T07:01:10,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:10,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:10,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:10,268 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T07:01:10,271 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36479, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T07:01:10,274 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T07:01:10,274 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:01:10,276 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C37089%2C1731308469485.meta, suffix=.meta, logDir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485, archiveDir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/oldWALs, maxLogs=32 2024-11-11T07:01:10,276 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta 2024-11-11T07:01:10,281 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta 2024-11-11T07:01:10,282 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33209:33209),(127.0.0.1/127.0.0.1:32895:32895)] 2024-11-11T07:01:10,283 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:01:10,283 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T07:01:10,283 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T07:01:10,283 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T07:01:10,283 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T07:01:10,283 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:10,284 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T07:01:10,284 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T07:01:10,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:01:10,286 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:01:10,286 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:10,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:10,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:01:10,287 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:01:10,287 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:10,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:10,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:01:10,288 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:01:10,288 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:10,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:10,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:01:10,289 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:01:10,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:10,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:10,290 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:01:10,290 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740 2024-11-11T07:01:10,291 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740 2024-11-11T07:01:10,292 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:01:10,292 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:01:10,293 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:01:10,294 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:01:10,295 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778781, jitterRate=-0.009729385375976562}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:01:10,295 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T07:01:10,296 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731308470284Writing region info on filesystem at 1731308470284Initializing all the Stores at 1731308470284Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308470284Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308470285 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308470285Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308470285Cleaning up temporary data from old regions at 1731308470292 (+7 ms)Running coprocessor post-open hooks at 1731308470295 (+3 ms)Region opened successfully at 1731308470296 (+1 ms) 2024-11-11T07:01:10,297 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731308470268 2024-11-11T07:01:10,299 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T07:01:10,299 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T07:01:10,300 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:10,301 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,37089,1731308469485, state=OPEN 2024-11-11T07:01:10,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:01:10,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:01:10,307 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:10,307 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:01:10,307 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:01:10,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T07:01:10,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,37089,1731308469485 in 192 msec 2024-11-11T07:01:10,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T07:01:10,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-11-11T07:01:10,313 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:01:10,313 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T07:01:10,314 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:01:10,314 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,37089,1731308469485, seqNum=-1] 2024-11-11T07:01:10,314 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:01:10,316 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46531, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:01:10,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 661 msec 2024-11-11T07:01:10,321 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731308470321, completionTime=-1 2024-11-11T07:01:10,321 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T07:01:10,321 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T07:01:10,323 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-11T07:01:10,323 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731308530323 2024-11-11T07:01:10,323 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731308590323 2024-11-11T07:01:10,323 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-11T07:01:10,323 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,41265,1731308469439-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:10,323 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,41265,1731308469439-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:10,323 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,41265,1731308469439-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:10,323 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f334e8b0fbdf:41265, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:10,323 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:10,324 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:10,325 DEBUG [master/f334e8b0fbdf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T07:01:10,327 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.802sec 2024-11-11T07:01:10,327 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T07:01:10,327 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T07:01:10,327 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T07:01:10,327 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T07:01:10,327 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T07:01:10,327 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,41265,1731308469439-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:01:10,327 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,41265,1731308469439-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T07:01:10,330 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T07:01:10,330 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T07:01:10,330 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,41265,1731308469439-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:10,407 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c19d09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:01:10,407 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f334e8b0fbdf,41265,-1 for getting cluster id 2024-11-11T07:01:10,407 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T07:01:10,409 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1e00b434-7822-4056-8fde-ffb82d290b25' 2024-11-11T07:01:10,409 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T07:01:10,409 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1e00b434-7822-4056-8fde-ffb82d290b25" 2024-11-11T07:01:10,409 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3153ac40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:01:10,409 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f334e8b0fbdf,41265,-1] 2024-11-11T07:01:10,410 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T07:01:10,410 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:01:10,411 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50996, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T07:01:10,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6806ec7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:01:10,413 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:01:10,413 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,37089,1731308469485, seqNum=-1] 2024-11-11T07:01:10,414 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:01:10,415 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44898, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:01:10,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f334e8b0fbdf,41265,1731308469439 2024-11-11T07:01:10,417 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:10,419 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T07:01:10,420 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-11T07:01:10,420 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-11T07:01:10,420 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T07:01:10,421 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is f334e8b0fbdf,41265,1731308469439 2024-11-11T07:01:10,421 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@37fdcc12 2024-11-11T07:01:10,421 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T07:01:10,422 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51000, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T07:01:10,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41265 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T07:01:10,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T07:01:10,423 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41265 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T07:01:10,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41265 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T07:01:10,425 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T07:01:10,426 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:10,426 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41265 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-11T07:01:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41265 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T07:01:10,427 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T07:01:10,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34231 is added to blk_1073741835_1011 (size=395) 2024-11-11T07:01:10,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741835_1011 (size=395) 2024-11-11T07:01:10,435 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9023fa18b668ef8af88ffa7953d2b788, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634 2024-11-11T07:01:10,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741836_1012 (size=78) 2024-11-11T07:01:10,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34231 is added to blk_1073741836_1012 (size=78) 2024-11-11T07:01:10,441 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:10,441 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 9023fa18b668ef8af88ffa7953d2b788, disabling compactions & flushes 2024-11-11T07:01:10,441 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:10,441 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:10,441 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. after waiting 0 ms 2024-11-11T07:01:10,441 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:10,441 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:10,441 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9023fa18b668ef8af88ffa7953d2b788: Waiting for close lock at 1731308470441Disabling compacts and flushes for region at 1731308470441Disabling writes for close at 1731308470441Writing region close event to WAL at 1731308470441Closed at 1731308470441 2024-11-11T07:01:10,443 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T07:01:10,443 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731308470443"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731308470443"}]},"ts":"1731308470443"} 2024-11-11T07:01:10,445 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T07:01:10,446 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T07:01:10,447 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731308470446"}]},"ts":"1731308470446"} 2024-11-11T07:01:10,448 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-11T07:01:10,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=9023fa18b668ef8af88ffa7953d2b788, ASSIGN}] 2024-11-11T07:01:10,450 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=9023fa18b668ef8af88ffa7953d2b788, ASSIGN 2024-11-11T07:01:10,451 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=9023fa18b668ef8af88ffa7953d2b788, ASSIGN; state=OFFLINE, location=f334e8b0fbdf,37089,1731308469485; forceNewPlan=false, retain=false 2024-11-11T07:01:10,601 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9023fa18b668ef8af88ffa7953d2b788, regionState=OPENING, regionLocation=f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:10,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=9023fa18b668ef8af88ffa7953d2b788, ASSIGN because future has completed 2024-11-11T07:01:10,604 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9023fa18b668ef8af88ffa7953d2b788, server=f334e8b0fbdf,37089,1731308469485}] 2024-11-11T07:01:10,761 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:10,761 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9023fa18b668ef8af88ffa7953d2b788, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788.', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:01:10,762 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,762 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:10,762 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,762 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,763 INFO [StoreOpener-9023fa18b668ef8af88ffa7953d2b788-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,764 INFO [StoreOpener-9023fa18b668ef8af88ffa7953d2b788-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9023fa18b668ef8af88ffa7953d2b788 columnFamilyName info 2024-11-11T07:01:10,764 DEBUG [StoreOpener-9023fa18b668ef8af88ffa7953d2b788-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:10,765 INFO [StoreOpener-9023fa18b668ef8af88ffa7953d2b788-1 {}] regionserver.HStore(327): Store=9023fa18b668ef8af88ffa7953d2b788/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:01:10,765 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,766 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/default/TestLogRolling-testLogRollOnPipelineRestart/9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,766 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/default/TestLogRolling-testLogRollOnPipelineRestart/9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,766 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,766 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,768 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,770 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/default/TestLogRolling-testLogRollOnPipelineRestart/9023fa18b668ef8af88ffa7953d2b788/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:01:10,770 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9023fa18b668ef8af88ffa7953d2b788; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802347, jitterRate=0.020237714052200317}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T07:01:10,771 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:10,771 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9023fa18b668ef8af88ffa7953d2b788: Running coprocessor pre-open hook at 1731308470762Writing region info on filesystem at 1731308470762Initializing all the Stores at 1731308470763 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308470763Cleaning up temporary data from old regions at 1731308470766 (+3 ms)Running coprocessor post-open hooks at 1731308470771 (+5 ms)Region opened successfully at 1731308470771 2024-11-11T07:01:10,773 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788., pid=6, masterSystemTime=1731308470757 2024-11-11T07:01:10,775 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:10,775 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:10,776 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9023fa18b668ef8af88ffa7953d2b788, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:10,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9023fa18b668ef8af88ffa7953d2b788, server=f334e8b0fbdf,37089,1731308469485 because future has completed 2024-11-11T07:01:10,782 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T07:01:10,782 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9023fa18b668ef8af88ffa7953d2b788, server=f334e8b0fbdf,37089,1731308469485 in 176 msec 2024-11-11T07:01:10,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T07:01:10,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=9023fa18b668ef8af88ffa7953d2b788, ASSIGN in 333 msec 2024-11-11T07:01:10,786 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T07:01:10,786 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731308470786"}]},"ts":"1731308470786"} 2024-11-11T07:01:10,788 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-11T07:01:10,789 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T07:01:10,791 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 366 msec 2024-11-11T07:01:11,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:11,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:11,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:11,605 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T07:01:11,605 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T07:01:11,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T07:01:11,606 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-11T07:01:11,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:01:11,607 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T07:01:12,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:12,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:12,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:13,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:13,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:13,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:14,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:14,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:14,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:15,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:15,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:15,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:15,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,804 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T07:01:15,829 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,837 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,837 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,837 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:15,844 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T07:01:15,844 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-11T07:01:16,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:16,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:16,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:17,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:17,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:17,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:18,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:18,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:18,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:19,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:19,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:19,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:20,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:20,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:20,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:20,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41265 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T07:01:20,506 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-11T07:01:20,506 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-11T07:01:20,508 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T07:01:20,509 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:20,512 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788., hostname=f334e8b0fbdf,37089,1731308469485, seqNum=2] 2024-11-11T07:01:21,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:21,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:21,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:22,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:22,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:22,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:22,514 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 2024-11-11T07:01:22,515 WARN [ResponseProcessor for block BP-880503148-172.17.0.2-1731308468769:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-880503148-172.17.0.2-1731308468769:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:22,515 WARN [ResponseProcessor for block BP-880503148-172.17.0.2-1731308468769:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-880503148-172.17.0.2-1731308468769:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:22,515 WARN [ResponseProcessor for block BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:34231,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:22,515 WARN [DataStreamer for file /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 block BP-880503148-172.17.0.2-1731308468769:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-880503148-172.17.0.2-1731308468769:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34231,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK], DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34231,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK]) is bad. 2024-11-11T07:01:22,515 WARN [DataStreamer for file /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta block BP-880503148-172.17.0.2-1731308468769:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-880503148-172.17.0.2-1731308468769:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34231,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK], DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34231,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK]) is bad. 2024-11-11T07:01:22,516 WARN [DataStreamer for file /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 block BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK], DatanodeInfoWithStorage[127.0.0.1:34231,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34231,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK]) is bad. 2024-11-11T07:01:22,516 WARN [PacketResponder: BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34231] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:22,516 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1859313216_22 at /127.0.0.1:42854 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34231:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42854 dst: /127.0.0.1:34231 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:22,516 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1859313216_22 at /127.0.0.1:42848 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34231:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42848 dst: /127.0.0.1:34231 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:22,516 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1859313216_22 at /127.0.0.1:52838 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45161:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52838 dst: /127.0.0.1:45161 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:22,516 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1859313216_22 at /127.0.0.1:52844 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45161:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52844 dst: /127.0.0.1:45161 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:22,516 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_739289855_22 at /127.0.0.1:52814 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45161:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52814 dst: /127.0.0.1:45161 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:22,517 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_739289855_22 at /127.0.0.1:42816 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34231:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42816 dst: /127.0.0.1:34231 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:22,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2835f29c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:22,518 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14a79ae9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:01:22,518 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:01:22,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@471bebfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:01:22,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fd7563{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,STOPPED} 2024-11-11T07:01:22,520 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:01:22,520 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-880503148-172.17.0.2-1731308468769 (Datanode Uuid 8c75707b-f307-4efb-bc9b-19cfb4a5bb0c) service to localhost/127.0.0.1:41569 2024-11-11T07:01:22,520 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:01:22,520 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:01:22,521 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data3/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:22,521 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data4/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:22,521 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:01:22,532 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:22,535 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:01:22,536 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:01:22,536 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:01:22,536 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:01:22,537 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c07ac8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:01:22,537 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@392000f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:01:22,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c23b3ce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/java.io.tmpdir/jetty-localhost-42095-hadoop-hdfs-3_4_1-tests_jar-_-any-10861597677947599587/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:22,652 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ccd5f6b{HTTP/1.1, (http/1.1)}{localhost:42095} 2024-11-11T07:01:22,652 INFO [Time-limited test {}] server.Server(415): Started @166231ms 2024-11-11T07:01:22,654 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:01:22,677 WARN [ResponseProcessor for block BP-880503148-172.17.0.2-1731308468769:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-880503148-172.17.0.2-1731308468769:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:22,677 WARN [ResponseProcessor for block BP-880503148-172.17.0.2-1731308468769:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-880503148-172.17.0.2-1731308468769:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:22,677 WARN [ResponseProcessor for block BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:22,677 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_739289855_22 at /127.0.0.1:38252 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45161:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38252 dst: /127.0.0.1:45161 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:22,677 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1859313216_22 at /127.0.0.1:38236 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45161:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38236 dst: /127.0.0.1:45161 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:22,677 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1859313216_22 at /127.0.0.1:38234 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45161:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38234 dst: /127.0.0.1:45161 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:22,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@109832d2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:22,686 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5fec76d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:01:22,686 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:01:22,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d6c2bfb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:01:22,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@165d0fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,STOPPED} 2024-11-11T07:01:22,688 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:01:22,688 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:01:22,688 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-880503148-172.17.0.2-1731308468769 (Datanode Uuid 0a7f41fb-f647-4208-a734-53cac447cc83) service to localhost/127.0.0.1:41569 2024-11-11T07:01:22,688 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:01:22,689 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data1/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:22,689 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data2/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:22,690 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:01:22,697 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:22,699 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:01:22,700 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:01:22,700 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:01:22,700 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:01:22,701 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3237c5f1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:01:22,701 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ba6dae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:01:22,749 WARN [Thread-1331 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:01:22,751 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc0025a6eb5cb6138 with lease ID 0xaced1f65c36f4d27: from storage DS-cc5f5bb1-4040-439b-aebf-559025392dc9 node DatanodeRegistration(127.0.0.1:40569, datanodeUuid=8c75707b-f307-4efb-bc9b-19cfb4a5bb0c, infoPort=41359, infoSecurePort=0, ipcPort=33269, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:22,751 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc0025a6eb5cb6138 with lease ID 0xaced1f65c36f4d27: from storage DS-2b708c8b-f0c2-4169-b431-4db5714d5e16 node DatanodeRegistration(127.0.0.1:40569, datanodeUuid=8c75707b-f307-4efb-bc9b-19cfb4a5bb0c, infoPort=41359, infoSecurePort=0, ipcPort=33269, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:22,817 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b68c165{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/java.io.tmpdir/jetty-localhost-34399-hadoop-hdfs-3_4_1-tests_jar-_-any-15413987865040074170/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:22,817 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@bee8090{HTTP/1.1, (http/1.1)}{localhost:34399} 2024-11-11T07:01:22,817 INFO [Time-limited test {}] server.Server(415): Started @166396ms 2024-11-11T07:01:22,818 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:01:22,959 WARN [Thread-1362 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:01:22,961 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd19dc39d9a4aea47 with lease ID 0xaced1f65c36f4d28: from storage DS-5f2f2062-f0de-4847-951f-d3ab6291782d node DatanodeRegistration(127.0.0.1:35475, datanodeUuid=0a7f41fb-f647-4208-a734-53cac447cc83, infoPort=33193, infoSecurePort=0, ipcPort=39483, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:22,961 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd19dc39d9a4aea47 with lease ID 0xaced1f65c36f4d28: from storage DS-33da8be0-11b8-4944-8536-65e9ee1f4458 node DatanodeRegistration(127.0.0.1:35475, datanodeUuid=0a7f41fb-f647-4208-a734-53cac447cc83, infoPort=33193, infoSecurePort=0, ipcPort=39483, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:23,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:23,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:23,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:23,836 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-11T07:01:23,839 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-11T07:01:23,840 ERROR [FSHLog-0-hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634-prefix:f334e8b0fbdf,37089,1731308469485 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:23,840 WARN [FSHLog-0-hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634-prefix:f334e8b0fbdf,37089,1731308469485 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:23,841 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C37089%2C1731308469485:(num 1731308469880) roll requested 2024-11-11T07:01:23,841 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 2024-11-11T07:01:23,847 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 newFile=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 2024-11-11T07:01:23,847 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:23,847 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:23,847 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:23,847 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:23,847 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:23,847 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 2024-11-11T07:01:23,848 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:23,848 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:23,848 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 2024-11-11T07:01:23,848 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33193:33193),(127.0.0.1/127.0.0.1:41359:41359)] 2024-11-11T07:01:23,848 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 is not closed yet, will try archiving it next time 2024-11-11T07:01:23,848 WARN [IPC Server handler 3 on default port 41569 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-11-11T07:01:23,849 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 after 1ms 2024-11-11T07:01:24,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:24,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:24,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:24,752 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T07:01:25,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:25,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:25,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:25,851 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-11T07:01:26,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:26,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:26,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:27,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:27,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:27,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:27,849 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 after 4001ms 2024-11-11T07:01:27,854 WARN [ResponseProcessor for block BP-880503148-172.17.0.2-1731308468769:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-880503148-172.17.0.2-1731308468769:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:27,855 WARN [DataStreamer for file /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 block BP-880503148-172.17.0.2-1731308468769:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-880503148-172.17.0.2-1731308468769:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35475,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK], DatanodeInfoWithStorage[127.0.0.1:40569,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35475,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]) is bad. 2024-11-11T07:01:27,855 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1859313216_22 at /127.0.0.1:48290 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35475:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48290 dst: /127.0.0.1:35475 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:27,855 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1859313216_22 at /127.0.0.1:41966 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40569:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41966 dst: /127.0.0.1:40569 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:27,856 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b68c165{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:27,857 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@bee8090{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:01:27,857 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:01:27,857 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ba6dae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:01:27,857 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3237c5f1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,STOPPED} 2024-11-11T07:01:27,859 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:01:27,859 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:01:27,859 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:01:27,859 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-880503148-172.17.0.2-1731308468769 (Datanode Uuid 0a7f41fb-f647-4208-a734-53cac447cc83) service to localhost/127.0.0.1:41569 2024-11-11T07:01:27,859 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data1/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:27,860 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data2/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:27,860 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:01:27,874 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:27,877 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:01:27,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:01:27,880 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:01:27,880 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:01:27,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167fd01b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:01:27,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71e80301{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:01:27,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d151a18{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/java.io.tmpdir/jetty-localhost-37129-hadoop-hdfs-3_4_1-tests_jar-_-any-4787284951761769333/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:27,996 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e3b369c{HTTP/1.1, (http/1.1)}{localhost:37129} 2024-11-11T07:01:27,996 INFO [Time-limited test {}] server.Server(415): Started @171575ms 2024-11-11T07:01:27,998 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:01:28,016 WARN [ResponseProcessor for block BP-880503148-172.17.0.2-1731308468769:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-880503148-172.17.0.2-1731308468769:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:28,016 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1859313216_22 at /127.0.0.1:41986 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40569:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41986 dst: /127.0.0.1:40569 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:28,021 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c23b3ce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:28,022 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ccd5f6b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:01:28,022 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:01:28,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@392000f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:01:28,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c07ac8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,STOPPED} 2024-11-11T07:01:28,023 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:01:28,023 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:01:28,024 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:01:28,024 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-880503148-172.17.0.2-1731308468769 (Datanode Uuid 8c75707b-f307-4efb-bc9b-19cfb4a5bb0c) service to localhost/127.0.0.1:41569 2024-11-11T07:01:28,025 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data3/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:28,025 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data4/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:28,025 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:01:28,040 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:28,044 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:01:28,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:01:28,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:01:28,045 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:01:28,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2026736d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:01:28,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ae00177{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:01:28,107 WARN [Thread-1405 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:01:28,110 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x72a3b291107555dd with lease ID 0xaced1f65c36f4d29: from storage DS-5f2f2062-f0de-4847-951f-d3ab6291782d node DatanodeRegistration(127.0.0.1:36931, datanodeUuid=0a7f41fb-f647-4208-a734-53cac447cc83, infoPort=39707, infoSecurePort=0, ipcPort=39097, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T07:01:28,110 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x72a3b291107555dd with lease ID 0xaced1f65c36f4d29: from storage DS-33da8be0-11b8-4944-8536-65e9ee1f4458 node DatanodeRegistration(127.0.0.1:36931, datanodeUuid=0a7f41fb-f647-4208-a734-53cac447cc83, infoPort=39707, infoSecurePort=0, ipcPort=39097, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:28,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:28,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2392cae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/java.io.tmpdir/jetty-localhost-42005-hadoop-hdfs-3_4_1-tests_jar-_-any-5461338963835968070/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:28,173 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29d5ccf{HTTP/1.1, (http/1.1)}{localhost:42005} 2024-11-11T07:01:28,173 INFO [Time-limited test {}] server.Server(415): Started @171752ms 2024-11-11T07:01:28,175 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:01:28,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:28,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:28,263 WARN [Thread-1436 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:01:28,265 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21dc16ee22e4bf93 with lease ID 0xaced1f65c36f4d2a: from storage DS-cc5f5bb1-4040-439b-aebf-559025392dc9 node DatanodeRegistration(127.0.0.1:46361, datanodeUuid=8c75707b-f307-4efb-bc9b-19cfb4a5bb0c, infoPort=43923, infoSecurePort=0, ipcPort=44671, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:28,265 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21dc16ee22e4bf93 with lease ID 0xaced1f65c36f4d2a: from storage DS-2b708c8b-f0c2-4169-b431-4db5714d5e16 node DatanodeRegistration(127.0.0.1:46361, datanodeUuid=8c75707b-f307-4efb-bc9b-19cfb4a5bb0c, infoPort=43923, infoSecurePort=0, ipcPort=44671, storageInfo=lv=-57;cid=testClusterID;nsid=476746548;c=1731308468769), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:29,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:29,193 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-11T07:01:29,196 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-11T07:01:29,198 ERROR [FSHLog-0-hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634-prefix:f334e8b0fbdf,37089,1731308469485 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40569,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:29,198 WARN [FSHLog-0-hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634-prefix:f334e8b0fbdf,37089,1731308469485 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40569,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:29,198 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C37089%2C1731308469485:(num 1731308483841) roll requested 2024-11-11T07:01:29,199 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37089%2C1731308469485.1731308489198 2024-11-11T07:01:29,209 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 newFile=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308489198 2024-11-11T07:01:29,210 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:29,210 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:29,210 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:29,210 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:29,210 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:29,211 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308489198 2024-11-11T07:01:29,211 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40569,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:29,211 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40569,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:29,211 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 2024-11-11T07:01:29,212 WARN [IPC Server handler 1 on default port 41569 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-11T07:01:29,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:29,213 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 after 2ms 2024-11-11T07:01:29,217 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39707:39707),(127.0.0.1/127.0.0.1:43923:43923)] 2024-11-11T07:01:29,217 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 is not closed yet, will try archiving it next time 2024-11-11T07:01:29,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:30,109 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T07:01:30,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:30,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:30,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:31,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:31,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:31,219 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 2024-11-11T07:01:31,224 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308489198 newFile=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 2024-11-11T07:01:31,224 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:31,224 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:31,225 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:31,225 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:31,225 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:31,225 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308489198 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 2024-11-11T07:01:31,226 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39707:39707),(127.0.0.1/127.0.0.1:43923:43923)] 2024-11-11T07:01:31,226 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 is not closed yet, will try archiving it next time 2024-11-11T07:01:31,226 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308489198 is not closed yet, will try archiving it next time 2024-11-11T07:01:31,226 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 2024-11-11T07:01:31,226 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 2024-11-11T07:01:31,227 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 after 1ms 2024-11-11T07:01:31,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741838_1019 (size=1264) 2024-11-11T07:01:31,227 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 2024-11-11T07:01:31,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741838_1019 (size=1264) 2024-11-11T07:01:31,228 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 is not closed yet, will try archiving it next time 2024-11-11T07:01:31,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:31,236 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731308470771/Put/vlen=218/seqid=0] 2024-11-11T07:01:31,236 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731308480513/Put/vlen=1045/seqid=0] 2024-11-11T07:01:31,236 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308469880 2024-11-11T07:01:31,236 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 2024-11-11T07:01:31,236 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 2024-11-11T07:01:31,237 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 after 1ms 2024-11-11T07:01:31,237 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 2024-11-11T07:01:31,240 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731308483840/Put/vlen=1045/seqid=0] 2024-11-11T07:01:31,240 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731308485852/Put/vlen=1045/seqid=0] 2024-11-11T07:01:31,240 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 2024-11-11T07:01:31,240 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308489198 2024-11-11T07:01:31,240 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308489198 2024-11-11T07:01:31,240 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308489198 after 0ms 2024-11-11T07:01:31,240 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308489198 2024-11-11T07:01:31,244 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731308489197/Put/vlen=1045/seqid=0] 2024-11-11T07:01:31,244 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 2024-11-11T07:01:31,244 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 2024-11-11T07:01:31,244 WARN [IPC Server handler 4 on default port 41569 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-11T07:01:31,244 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 after 0ms 2024-11-11T07:01:32,110 WARN [ResponseProcessor for block BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:32,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_739289855_22 at /127.0.0.1:40012 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40012 dst: /127.0.0.1:36931 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:36931 remote=/127.0.0.1:40012]. Total timeout mills is 60000, 59113 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:32,111 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_739289855_22 at /127.0.0.1:57286 [Receiving block BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46361:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57286 dst: /127.0.0.1:46361 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:32,111 WARN [DataStreamer for file /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 block BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36931,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK], DatanodeInfoWithStorage[127.0.0.1:46361,DS-cc5f5bb1-4040-439b-aebf-559025392dc9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36931,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]) is bad. 2024-11-11T07:01:32,112 WARN [DataStreamer for file /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 block BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:32,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741839_1022 (size=85) 2024-11-11T07:01:32,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741839_1022 (size=85) 2024-11-11T07:01:32,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:32,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:32,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:33,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:33,214 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308483841 after 4003ms 2024-11-11T07:01:33,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:33,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:34,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:34,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:34,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:35,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:35,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:35,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:35,246 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 after 4002ms 2024-11-11T07:01:35,246 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 2024-11-11T07:01:35,252 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 2024-11-11T07:01:35,253 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-11T07:01:35,253 ERROR [FSHLog-0-hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634-prefix:f334e8b0fbdf,37089,1731308469485.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:35,254 WARN [FSHLog-0-hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634-prefix:f334e8b0fbdf,37089,1731308469485.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:35,254 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C37089%2C1731308469485.meta:.meta(num 1731308470276) roll requested 2024-11-11T07:01:35,254 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308495254.meta 2024-11-11T07:01:35,280 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,281 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,281 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,281 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,281 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,281 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308495254.meta 2024-11-11T07:01:35,284 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:35,285 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:35,285 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta 2024-11-11T07:01:35,285 WARN [IPC Server handler 4 on default port 41569 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-11T07:01:35,286 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta after 1ms 2024-11-11T07:01:35,289 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43923:43923),(127.0.0.1/127.0.0.1:39707:39707)] 2024-11-11T07:01:35,289 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta is not closed yet, will try archiving it next time 2024-11-11T07:01:35,308 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/.tmp/info/b9668e52d6204fe6887cced0bdd91c4a is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788./info:regioninfo/1731308470776/Put/seqid=0 2024-11-11T07:01:35,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741841_1025 (size=7125) 2024-11-11T07:01:35,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741841_1025 (size=7125) 2024-11-11T07:01:35,315 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/.tmp/info/b9668e52d6204fe6887cced0bdd91c4a 2024-11-11T07:01:35,342 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/.tmp/ns/14c36082537c467d9995ac18fd31c8ba is 43, key is default/ns:d/1731308470316/Put/seqid=0 2024-11-11T07:01:35,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741842_1026 (size=5153) 2024-11-11T07:01:35,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741842_1026 (size=5153) 2024-11-11T07:01:35,365 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/.tmp/ns/14c36082537c467d9995ac18fd31c8ba 2024-11-11T07:01:35,398 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/.tmp/table/ef38313d56c9455cb7a5edbc3a1b1471 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731308470786/Put/seqid=0 2024-11-11T07:01:35,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741843_1027 (size=5438) 2024-11-11T07:01:35,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741843_1027 (size=5438) 2024-11-11T07:01:35,417 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/.tmp/table/ef38313d56c9455cb7a5edbc3a1b1471 2024-11-11T07:01:35,425 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/.tmp/info/b9668e52d6204fe6887cced0bdd91c4a as hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/info/b9668e52d6204fe6887cced0bdd91c4a 2024-11-11T07:01:35,430 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/info/b9668e52d6204fe6887cced0bdd91c4a, entries=10, sequenceid=11, filesize=7.0 K 2024-11-11T07:01:35,432 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/.tmp/ns/14c36082537c467d9995ac18fd31c8ba as hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/ns/14c36082537c467d9995ac18fd31c8ba 2024-11-11T07:01:35,437 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/ns/14c36082537c467d9995ac18fd31c8ba, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T07:01:35,438 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/.tmp/table/ef38313d56c9455cb7a5edbc3a1b1471 as hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/table/ef38313d56c9455cb7a5edbc3a1b1471 2024-11-11T07:01:35,445 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/table/ef38313d56c9455cb7a5edbc3a1b1471, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T07:01:35,447 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 193ms, sequenceid=11, compaction requested=false 2024-11-11T07:01:35,447 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T07:01:35,447 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 9023fa18b668ef8af88ffa7953d2b788 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-11T07:01:35,447 ERROR [FSHLog-0-hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634-prefix:f334e8b0fbdf,37089,1731308469485 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:35,448 WARN [FSHLog-0-hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634-prefix:f334e8b0fbdf,37089,1731308469485 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:35,448 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C37089%2C1731308469485:(num 1731308491219) roll requested 2024-11-11T07:01:35,448 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37089%2C1731308469485.1731308495448 2024-11-11T07:01:35,459 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 newFile=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308495448 2024-11-11T07:01:35,459 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,460 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,460 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,464 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,464 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,465 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308495448 2024-11-11T07:01:35,465 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:35,465 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-880503148-172.17.0.2-1731308468769:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:35,465 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 2024-11-11T07:01:35,466 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 after 1ms 2024-11-11T07:01:35,481 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 to hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/oldWALs/f334e8b0fbdf%2C37089%2C1731308469485.1731308491219 2024-11-11T07:01:35,482 DEBUG [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43923:43923),(127.0.0.1/127.0.0.1:39707:39707)] 2024-11-11T07:01:35,503 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/default/TestLogRolling-testLogRollOnPipelineRestart/9023fa18b668ef8af88ffa7953d2b788/.tmp/info/966406263de545a9bfca2445133b115e is 1080, key is row1002/info:/1731308480513/Put/seqid=0 2024-11-11T07:01:35,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741845_1029 (size=9270) 2024-11-11T07:01:35,518 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/default/TestLogRolling-testLogRollOnPipelineRestart/9023fa18b668ef8af88ffa7953d2b788/.tmp/info/966406263de545a9bfca2445133b115e 2024-11-11T07:01:35,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741845_1029 (size=9270) 2024-11-11T07:01:35,529 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/default/TestLogRolling-testLogRollOnPipelineRestart/9023fa18b668ef8af88ffa7953d2b788/.tmp/info/966406263de545a9bfca2445133b115e as hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/default/TestLogRolling-testLogRollOnPipelineRestart/9023fa18b668ef8af88ffa7953d2b788/info/966406263de545a9bfca2445133b115e 2024-11-11T07:01:35,538 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/default/TestLogRolling-testLogRollOnPipelineRestart/9023fa18b668ef8af88ffa7953d2b788/info/966406263de545a9bfca2445133b115e, entries=4, sequenceid=8, filesize=9.1 K 2024-11-11T07:01:35,541 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 9023fa18b668ef8af88ffa7953d2b788 in 94ms, sequenceid=8, compaction requested=false 2024-11-11T07:01:35,541 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 9023fa18b668ef8af88ffa7953d2b788: 2024-11-11T07:01:35,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T07:01:35,547 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:01:35,547 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:01:35,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:01:35,548 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T07:01:35,548 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:01:35,548 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T07:01:35,548 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1498404850, stopped=false 2024-11-11T07:01:35,548 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f334e8b0fbdf,41265,1731308469439 2024-11-11T07:01:35,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:01:35,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:35,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:01:35,550 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:01:35,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:35,551 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:01:35,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:01:35,551 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:01:35,551 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:01:35,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:01:35,551 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f334e8b0fbdf,37089,1731308469485' ***** 2024-11-11T07:01:35,551 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T07:01:35,552 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T07:01:35,552 INFO [RS:0;f334e8b0fbdf:37089 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T07:01:35,552 INFO [RS:0;f334e8b0fbdf:37089 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T07:01:35,552 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T07:01:35,552 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(3091): Received CLOSE for 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:35,553 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(959): stopping server f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:35,553 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:01:35,553 INFO [RS:0;f334e8b0fbdf:37089 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f334e8b0fbdf:37089. 2024-11-11T07:01:35,553 DEBUG [RS:0;f334e8b0fbdf:37089 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:01:35,553 DEBUG [RS:0;f334e8b0fbdf:37089 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:01:35,553 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T07:01:35,553 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T07:01:35,553 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T07:01:35,553 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T07:01:35,554 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9023fa18b668ef8af88ffa7953d2b788, disabling compactions & flushes 2024-11-11T07:01:35,554 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:35,554 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:35,554 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. after waiting 0 ms 2024-11-11T07:01:35,554 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:35,560 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T07:01:35,560 DEBUG [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 9023fa18b668ef8af88ffa7953d2b788=TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788.} 2024-11-11T07:01:35,560 DEBUG [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9023fa18b668ef8af88ffa7953d2b788 2024-11-11T07:01:35,560 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:01:35,561 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:01:35,561 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:01:35,561 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:01:35,561 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:01:35,565 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/default/TestLogRolling-testLogRollOnPipelineRestart/9023fa18b668ef8af88ffa7953d2b788/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-11T07:01:35,566 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:35,566 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9023fa18b668ef8af88ffa7953d2b788: Waiting for close lock at 1731308495554Running coprocessor pre-close hooks at 1731308495554Disabling compacts and flushes for region at 1731308495554Disabling writes for close at 1731308495554Writing region close event to WAL at 1731308495561 (+7 ms)Running coprocessor post-close hooks at 1731308495566 (+5 ms)Closed at 1731308495566 2024-11-11T07:01:35,566 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731308470422.9023fa18b668ef8af88ffa7953d2b788. 2024-11-11T07:01:35,573 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T07:01:35,573 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:01:35,573 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:01:35,573 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308495560Running coprocessor pre-close hooks at 1731308495560Disabling compacts and flushes for region at 1731308495560Disabling writes for close at 1731308495561 (+1 ms)Writing region close event to WAL at 1731308495566 (+5 ms)Running coprocessor post-close hooks at 1731308495573 (+7 ms)Closed at 1731308495573 2024-11-11T07:01:35,574 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T07:01:35,748 INFO [regionserver/f334e8b0fbdf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:01:35,761 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(976): stopping server f334e8b0fbdf,37089,1731308469485; all regions closed. 2024-11-11T07:01:35,761 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,761 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,762 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,762 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,762 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:35,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741840_1023 (size=825) 2024-11-11T07:01:35,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741840_1023 (size=825) 2024-11-11T07:01:35,834 INFO [regionserver/f334e8b0fbdf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T07:01:35,834 INFO [regionserver/f334e8b0fbdf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T07:01:36,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:36,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:36,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:37,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:37,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:37,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:38,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:38,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:38,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:39,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:39,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:39,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:39,265 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T07:01:39,286 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta after 4001ms 2024-11-11T07:01:39,287 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/WALs/f334e8b0fbdf,37089,1731308469485/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta to hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/oldWALs/f334e8b0fbdf%2C37089%2C1731308469485.meta.1731308470276.meta 2024-11-11T07:01:39,289 DEBUG [RS:0;f334e8b0fbdf:37089 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/oldWALs 2024-11-11T07:01:39,289 INFO [RS:0;f334e8b0fbdf:37089 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C37089%2C1731308469485.meta:.meta(num 1731308495254) 2024-11-11T07:01:39,290 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,290 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,290 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,290 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,290 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741844_1028 (size=1162) 2024-11-11T07:01:39,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741844_1028 (size=1162) 2024-11-11T07:01:39,296 DEBUG [RS:0;f334e8b0fbdf:37089 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/oldWALs 2024-11-11T07:01:39,296 INFO [RS:0;f334e8b0fbdf:37089 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C37089%2C1731308469485:(num 1731308495448) 2024-11-11T07:01:39,296 DEBUG [RS:0;f334e8b0fbdf:37089 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:01:39,296 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:01:39,296 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:01:39,297 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.ChoreService(370): Chore service for: regionserver/f334e8b0fbdf:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T07:01:39,297 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:01:39,297 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:01:39,297 INFO [RS:0;f334e8b0fbdf:37089 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37089 2024-11-11T07:01:39,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:01:39,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f334e8b0fbdf,37089,1731308469485 2024-11-11T07:01:39,299 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:01:39,300 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f334e8b0fbdf,37089,1731308469485] 2024-11-11T07:01:39,304 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f334e8b0fbdf,37089,1731308469485 already deleted, retry=false 2024-11-11T07:01:39,304 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f334e8b0fbdf,37089,1731308469485 expired; onlineServers=0 2024-11-11T07:01:39,304 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f334e8b0fbdf,41265,1731308469439' ***** 2024-11-11T07:01:39,304 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T07:01:39,304 INFO [M:0;f334e8b0fbdf:41265 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:01:39,304 INFO [M:0;f334e8b0fbdf:41265 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:01:39,304 DEBUG [M:0;f334e8b0fbdf:41265 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T07:01:39,304 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T07:01:39,304 DEBUG [M:0;f334e8b0fbdf:41265 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T07:01:39,304 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308469667 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308469667,5,FailOnTimeoutGroup] 2024-11-11T07:01:39,304 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308469667 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308469667,5,FailOnTimeoutGroup] 2024-11-11T07:01:39,304 INFO [M:0;f334e8b0fbdf:41265 {}] hbase.ChoreService(370): Chore service for: master/f334e8b0fbdf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T07:01:39,305 INFO [M:0;f334e8b0fbdf:41265 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:01:39,305 DEBUG [M:0;f334e8b0fbdf:41265 {}] master.HMaster(1795): Stopping service threads 2024-11-11T07:01:39,305 INFO [M:0;f334e8b0fbdf:41265 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T07:01:39,305 INFO [M:0;f334e8b0fbdf:41265 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:01:39,305 INFO [M:0;f334e8b0fbdf:41265 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T07:01:39,305 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T07:01:39,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T07:01:39,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:39,306 DEBUG [M:0;f334e8b0fbdf:41265 {}] zookeeper.ZKUtil(347): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T07:01:39,306 WARN [M:0;f334e8b0fbdf:41265 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T07:01:39,307 INFO [M:0;f334e8b0fbdf:41265 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/.lastflushedseqids 2024-11-11T07:01:39,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741846_1030 (size=120) 2024-11-11T07:01:39,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741846_1030 (size=120) 2024-11-11T07:01:39,316 INFO [M:0;f334e8b0fbdf:41265 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T07:01:39,316 INFO [M:0;f334e8b0fbdf:41265 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T07:01:39,316 DEBUG [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:01:39,316 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:39,316 DEBUG [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:39,316 DEBUG [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:01:39,316 DEBUG [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:39,316 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-11T07:01:39,316 ERROR [FSHLog-0-hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData-prefix:f334e8b0fbdf,41265,1731308469439 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:39,317 WARN [FSHLog-0-hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData-prefix:f334e8b0fbdf,41265,1731308469439 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:39,317 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog f334e8b0fbdf%2C41265%2C1731308469439:(num 1731308469586) roll requested 2024-11-11T07:01:39,317 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C41265%2C1731308469439.1731308499317 2024-11-11T07:01:39,322 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,322 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,322 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,322 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,322 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,322 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439/f334e8b0fbdf%2C41265%2C1731308469439.1731308499317 2024-11-11T07:01:39,322 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:39,323 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45161,DS-5f2f2062-f0de-4847-951f-d3ab6291782d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T07:01:39,323 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 2024-11-11T07:01:39,323 WARN [IPC Server handler 0 on default port 41569 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-11T07:01:39,323 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 after 0ms 2024-11-11T07:01:39,324 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43923:43923),(127.0.0.1/127.0.0.1:39707:39707)] 2024-11-11T07:01:39,324 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 is not closed yet, will try archiving it next time 2024-11-11T07:01:39,340 DEBUG [M:0;f334e8b0fbdf:41265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cf8551cb027f4d1395df5dd0dcfde9ff is 82, key is hbase:meta,,1/info:regioninfo/1731308470300/Put/seqid=0 2024-11-11T07:01:39,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741848_1033 (size=5672) 2024-11-11T07:01:39,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741848_1033 (size=5672) 2024-11-11T07:01:39,345 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cf8551cb027f4d1395df5dd0dcfde9ff 2024-11-11T07:01:39,372 DEBUG [M:0;f334e8b0fbdf:41265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2c58db500afe4c05a80202037eb92078 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731308470790/Put/seqid=0 2024-11-11T07:01:39,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741849_1034 (size=6118) 2024-11-11T07:01:39,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741849_1034 (size=6118) 2024-11-11T07:01:39,378 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2c58db500afe4c05a80202037eb92078 2024-11-11T07:01:39,396 DEBUG [M:0;f334e8b0fbdf:41265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0c5ba2092a9f443d936cbe71993afd0b is 69, key is f334e8b0fbdf,37089,1731308469485/rs:state/1731308469734/Put/seqid=0 2024-11-11T07:01:39,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741850_1035 (size=5156) 2024-11-11T07:01:39,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741850_1035 (size=5156) 2024-11-11T07:01:39,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:01:39,401 INFO [RS:0;f334e8b0fbdf:37089 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:01:39,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37089-0x10105d4a73e0001, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:01:39,401 INFO [RS:0;f334e8b0fbdf:37089 {}] regionserver.HRegionServer(1031): Exiting; stopping=f334e8b0fbdf,37089,1731308469485; zookeeper connection closed. 2024-11-11T07:01:39,401 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0c5ba2092a9f443d936cbe71993afd0b 2024-11-11T07:01:39,401 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5851bf8c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5851bf8c 2024-11-11T07:01:39,401 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T07:01:39,420 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T07:01:39,420 DEBUG [M:0;f334e8b0fbdf:41265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1cfb45a15dad4f708bf71f2ed86f5497 is 52, key is load_balancer_on/state:d/1731308470418/Put/seqid=0 2024-11-11T07:01:39,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741851_1036 (size=5056) 2024-11-11T07:01:39,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741851_1036 (size=5056) 2024-11-11T07:01:39,425 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1cfb45a15dad4f708bf71f2ed86f5497 2024-11-11T07:01:39,431 DEBUG [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cf8551cb027f4d1395df5dd0dcfde9ff as hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cf8551cb027f4d1395df5dd0dcfde9ff 2024-11-11T07:01:39,435 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cf8551cb027f4d1395df5dd0dcfde9ff, entries=8, sequenceid=56, filesize=5.5 K 2024-11-11T07:01:39,436 DEBUG [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2c58db500afe4c05a80202037eb92078 as hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2c58db500afe4c05a80202037eb92078 2024-11-11T07:01:39,440 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2c58db500afe4c05a80202037eb92078, entries=6, sequenceid=56, filesize=6.0 K 2024-11-11T07:01:39,440 DEBUG [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0c5ba2092a9f443d936cbe71993afd0b as hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0c5ba2092a9f443d936cbe71993afd0b 2024-11-11T07:01:39,445 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0c5ba2092a9f443d936cbe71993afd0b, entries=1, sequenceid=56, filesize=5.0 K 2024-11-11T07:01:39,446 DEBUG [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1cfb45a15dad4f708bf71f2ed86f5497 as hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1cfb45a15dad4f708bf71f2ed86f5497 2024-11-11T07:01:39,450 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1cfb45a15dad4f708bf71f2ed86f5497, entries=1, sequenceid=56, filesize=4.9 K 2024-11-11T07:01:39,451 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=56, compaction requested=false 2024-11-11T07:01:39,452 INFO [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:39,452 DEBUG [M:0;f334e8b0fbdf:41265 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308499316Disabling compacts and flushes for region at 1731308499316Disabling writes for close at 1731308499316Obtaining lock to block concurrent updates at 1731308499316Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731308499316Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731308499317 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731308499325 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731308499325Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731308499339 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731308499339Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731308499350 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731308499372 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731308499372Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731308499383 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731308499396 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731308499396Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731308499406 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731308499420 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731308499420Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66386f7e: reopening flushed file at 1731308499430 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b924094: reopening flushed file at 1731308499435 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73a45a7c: reopening flushed file at 1731308499440 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3589326a: reopening flushed file at 1731308499445 (+5 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=56, compaction requested=false at 1731308499451 (+6 ms)Writing region close event to WAL at 1731308499452 (+1 ms)Closed at 1731308499452 2024-11-11T07:01:39,453 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,453 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,453 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,453 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,453 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:01:39,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36931 is added to blk_1073741847_1031 (size=757) 2024-11-11T07:01:39,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46361 is added to blk_1073741847_1031 (size=757) 2024-11-11T07:01:40,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:40,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 after 68051ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:01:40,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:40,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:40,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:40,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,094 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T07:01:41,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,096 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,120 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:41,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:41,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:41,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:41,605 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:01:41,605 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T07:01:41,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T07:01:41,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T07:01:42,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:42,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:42,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:42,265 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T07:01:43,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:43,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:43,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:43,324 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 after 4001ms 2024-11-11T07:01:43,325 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/WALs/f334e8b0fbdf,41265,1731308469439/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 to hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/oldWALs/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 2024-11-11T07:01:43,329 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/MasterData/oldWALs/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586 to hdfs://localhost:41569/user/jenkins/test-data/b7895072-cd6f-a7bd-1835-1df878d4a634/oldWALs/f334e8b0fbdf%2C41265%2C1731308469439.1731308469586$masterlocalwal$ 2024-11-11T07:01:43,329 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:01:43,329 INFO [M:0;f334e8b0fbdf:41265 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T07:01:43,329 INFO [M:0;f334e8b0fbdf:41265 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41265 2024-11-11T07:01:43,330 INFO [M:0;f334e8b0fbdf:41265 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:01:43,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:01:43,434 INFO [M:0;f334e8b0fbdf:41265 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:01:43,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41265-0x10105d4a73e0000, quorum=127.0.0.1:56983, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:01:43,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2392cae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:43,437 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29d5ccf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:01:43,437 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:01:43,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ae00177{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:01:43,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2026736d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,STOPPED} 2024-11-11T07:01:43,438 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:01:43,438 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:01:43,439 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:01:43,438 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-880503148-172.17.0.2-1731308468769 (Datanode Uuid 8c75707b-f307-4efb-bc9b-19cfb4a5bb0c) service to localhost/127.0.0.1:41569 2024-11-11T07:01:43,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data3/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:43,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data4/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:43,440 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:01:43,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d151a18{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:43,448 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e3b369c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:01:43,448 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:01:43,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71e80301{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:01:43,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167fd01b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,STOPPED} 2024-11-11T07:01:43,449 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:01:43,449 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:01:43,450 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-880503148-172.17.0.2-1731308468769 (Datanode Uuid 0a7f41fb-f647-4208-a734-53cac447cc83) service to localhost/127.0.0.1:41569 2024-11-11T07:01:43,450 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:01:43,450 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] datanode.FileIoProvider(249): Failed to delete file /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data1/current/BP-880503148-172.17.0.2-1731308468769/current/dfsUsed 2024-11-11T07:01:43,450 WARN [BP-880503148-172.17.0.2-1731308468769 heartbeating to localhost/127.0.0.1:41569 {}] impl.BlockPoolSlice(395): Failed to delete old dfsUsed file in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data1/current/BP-880503148-172.17.0.2-1731308468769/current 2024-11-11T07:01:43,450 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data1/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:43,450 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/cluster_1896808d-55e0-067a-97b1-464055333fa7/data/data2/current/BP-880503148-172.17.0.2-1731308468769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:01:43,450 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:01:43,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@347a2271{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:01:43,457 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@464ea64c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:01:43,457 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:01:43,458 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@662aecf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:01:43,458 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27a49013{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir/,STOPPED} 2024-11-11T07:01:43,465 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T07:01:43,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T07:01:43,501 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41569 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41569 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41569 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41569 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41569 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41569 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41569 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:41569 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 436) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=38 (was 31) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8590 (was 9082) 2024-11-11T07:01:43,513 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=38, ProcessCount=11, AvailableMemoryMB=8590 2024-11-11T07:01:43,513 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T07:01:43,513 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.log.dir so I do NOT create it in target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207 2024-11-11T07:01:43,513 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40757b9-66f3-b1ba-cc08-6913d0998b34/hadoop.tmp.dir so I do NOT create it in target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207 2024-11-11T07:01:43,513 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e, deleteOnExit=true 2024-11-11T07:01:43,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T07:01:43,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/test.cache.data in system properties and HBase conf 2024-11-11T07:01:43,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T07:01:43,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/hadoop.log.dir in system properties and HBase conf 2024-11-11T07:01:43,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T07:01:43,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T07:01:43,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T07:01:43,514 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T07:01:43,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:01:43,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:01:43,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/nfs.dump.dir in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/java.io.tmpdir in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T07:01:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T07:01:43,529 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:01:43,610 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:43,614 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:01:43,617 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:01:43,617 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:01:43,617 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:01:43,618 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:43,618 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a75d232{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:01:43,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a7d1da5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:01:43,740 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ece2bc8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/java.io.tmpdir/jetty-localhost-39511-hadoop-hdfs-3_4_1-tests_jar-_-any-1040314148680268193/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:01:43,741 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@642a12de{HTTP/1.1, (http/1.1)}{localhost:39511} 2024-11-11T07:01:43,741 INFO [Time-limited test {}] server.Server(415): Started @187320ms 2024-11-11T07:01:43,756 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:01:43,836 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:43,840 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:01:43,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:01:43,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:01:43,841 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:01:43,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@113ad868{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:01:43,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@214c2124{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:01:43,971 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a3c93e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/java.io.tmpdir/jetty-localhost-43635-hadoop-hdfs-3_4_1-tests_jar-_-any-15488444139144399915/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:43,971 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30c9629f{HTTP/1.1, (http/1.1)}{localhost:43635} 2024-11-11T07:01:43,971 INFO [Time-limited test {}] server.Server(415): Started @187550ms 2024-11-11T07:01:43,973 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:01:44,018 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:01:44,021 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:01:44,022 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:01:44,022 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:01:44,022 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:01:44,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aaa4790{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:01:44,023 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e470e04{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:01:44,084 WARN [Thread-1630 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/data/data1/current/BP-119408927-172.17.0.2-1731308503549/current, will proceed with Du for space computation calculation, 2024-11-11T07:01:44,084 WARN [Thread-1631 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/data/data2/current/BP-119408927-172.17.0.2-1731308503549/current, will proceed with Du for space computation calculation, 2024-11-11T07:01:44,102 WARN [Thread-1609 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:01:44,106 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5f929dc85fd80844 with lease ID 0xc1baf667655364aa: Processing first storage report for DS-b1df77e4-dd9e-4b5e-bf2a-b2932d74dcc8 from datanode DatanodeRegistration(127.0.0.1:43147, datanodeUuid=faec0a49-413e-42a7-85b3-9f9fc5718e38, infoPort=41937, infoSecurePort=0, ipcPort=36819, storageInfo=lv=-57;cid=testClusterID;nsid=1578197652;c=1731308503549) 2024-11-11T07:01:44,106 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f929dc85fd80844 with lease ID 0xc1baf667655364aa: from storage DS-b1df77e4-dd9e-4b5e-bf2a-b2932d74dcc8 node DatanodeRegistration(127.0.0.1:43147, datanodeUuid=faec0a49-413e-42a7-85b3-9f9fc5718e38, infoPort=41937, infoSecurePort=0, ipcPort=36819, storageInfo=lv=-57;cid=testClusterID;nsid=1578197652;c=1731308503549), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:44,106 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5f929dc85fd80844 with lease ID 0xc1baf667655364aa: Processing first storage report for DS-50bf5a20-983b-46a3-86d8-e0714f3ddd7a from datanode DatanodeRegistration(127.0.0.1:43147, datanodeUuid=faec0a49-413e-42a7-85b3-9f9fc5718e38, infoPort=41937, infoSecurePort=0, ipcPort=36819, storageInfo=lv=-57;cid=testClusterID;nsid=1578197652;c=1731308503549) 2024-11-11T07:01:44,106 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f929dc85fd80844 with lease ID 0xc1baf667655364aa: from storage DS-50bf5a20-983b-46a3-86d8-e0714f3ddd7a node DatanodeRegistration(127.0.0.1:43147, datanodeUuid=faec0a49-413e-42a7-85b3-9f9fc5718e38, infoPort=41937, infoSecurePort=0, ipcPort=36819, storageInfo=lv=-57;cid=testClusterID;nsid=1578197652;c=1731308503549), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:44,143 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ecaedd6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/java.io.tmpdir/jetty-localhost-38451-hadoop-hdfs-3_4_1-tests_jar-_-any-12570654078076602012/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:01:44,144 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@222377c1{HTTP/1.1, (http/1.1)}{localhost:38451} 2024-11-11T07:01:44,144 INFO [Time-limited test {}] server.Server(415): Started @187723ms 2024-11-11T07:01:44,145 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:01:44,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:44,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:44,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:44,255 WARN [Thread-1656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/data/data3/current/BP-119408927-172.17.0.2-1731308503549/current, will proceed with Du for space computation calculation, 2024-11-11T07:01:44,255 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/data/data4/current/BP-119408927-172.17.0.2-1731308503549/current, will proceed with Du for space computation calculation, 2024-11-11T07:01:44,273 WARN [Thread-1645 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:01:44,276 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a9f5f26df4b9296 with lease ID 0xc1baf667655364ab: Processing first storage report for DS-f1ea24d2-2fb8-4bc4-9e1b-63512291a64c from datanode DatanodeRegistration(127.0.0.1:40977, datanodeUuid=d0054b2c-3178-4547-b3e9-1f04bc04847e, infoPort=46397, infoSecurePort=0, ipcPort=38669, storageInfo=lv=-57;cid=testClusterID;nsid=1578197652;c=1731308503549) 2024-11-11T07:01:44,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a9f5f26df4b9296 with lease ID 0xc1baf667655364ab: from storage DS-f1ea24d2-2fb8-4bc4-9e1b-63512291a64c node DatanodeRegistration(127.0.0.1:40977, datanodeUuid=d0054b2c-3178-4547-b3e9-1f04bc04847e, infoPort=46397, infoSecurePort=0, ipcPort=38669, storageInfo=lv=-57;cid=testClusterID;nsid=1578197652;c=1731308503549), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:44,276 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a9f5f26df4b9296 with lease ID 0xc1baf667655364ab: Processing first storage report for DS-802d3618-2ff4-4a14-8476-63e2ca73db9d from datanode DatanodeRegistration(127.0.0.1:40977, datanodeUuid=d0054b2c-3178-4547-b3e9-1f04bc04847e, infoPort=46397, infoSecurePort=0, ipcPort=38669, storageInfo=lv=-57;cid=testClusterID;nsid=1578197652;c=1731308503549) 2024-11-11T07:01:44,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a9f5f26df4b9296 with lease ID 0xc1baf667655364ab: from storage DS-802d3618-2ff4-4a14-8476-63e2ca73db9d node DatanodeRegistration(127.0.0.1:40977, datanodeUuid=d0054b2c-3178-4547-b3e9-1f04bc04847e, infoPort=46397, infoSecurePort=0, ipcPort=38669, storageInfo=lv=-57;cid=testClusterID;nsid=1578197652;c=1731308503549), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:01:44,368 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207 2024-11-11T07:01:44,371 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/zookeeper_0, clientPort=64017, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T07:01:44,372 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64017 2024-11-11T07:01:44,372 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:44,373 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:44,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:01:44,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:01:44,383 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17 with version=8 2024-11-11T07:01:44,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/hbase-staging 2024-11-11T07:01:44,384 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:01:44,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:44,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:44,385 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:01:44,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:44,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:01:44,385 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T07:01:44,385 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:01:44,385 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35163 2024-11-11T07:01:44,387 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35163 connecting to ZooKeeper ensemble=127.0.0.1:64017 2024-11-11T07:01:44,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:351630x0, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:01:44,394 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35163-0x10105d52fc20000 connected 2024-11-11T07:01:44,408 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:44,409 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:44,411 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:01:44,411 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17, hbase.cluster.distributed=false 2024-11-11T07:01:44,412 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:01:44,413 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35163 2024-11-11T07:01:44,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35163 2024-11-11T07:01:44,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35163 2024-11-11T07:01:44,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35163 2024-11-11T07:01:44,415 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35163 2024-11-11T07:01:44,431 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:01:44,431 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:44,431 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:44,431 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:01:44,431 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:01:44,431 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:01:44,431 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T07:01:44,431 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:01:44,432 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39227 2024-11-11T07:01:44,433 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39227 connecting to ZooKeeper ensemble=127.0.0.1:64017 2024-11-11T07:01:44,434 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:44,435 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:44,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392270x0, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:01:44,440 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39227-0x10105d52fc20001 connected 2024-11-11T07:01:44,440 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:01:44,441 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T07:01:44,441 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T07:01:44,442 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T07:01:44,442 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:01:44,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39227 2024-11-11T07:01:44,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39227 2024-11-11T07:01:44,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39227 2024-11-11T07:01:44,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39227 2024-11-11T07:01:44,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39227 2024-11-11T07:01:44,455 DEBUG [M:0;f334e8b0fbdf:35163 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f334e8b0fbdf:35163 2024-11-11T07:01:44,455 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f334e8b0fbdf,35163,1731308504384 2024-11-11T07:01:44,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:01:44,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:01:44,457 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f334e8b0fbdf,35163,1731308504384 2024-11-11T07:01:44,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T07:01:44,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:44,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:44,459 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T07:01:44,460 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f334e8b0fbdf,35163,1731308504384 from backup master directory 2024-11-11T07:01:44,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f334e8b0fbdf,35163,1731308504384 2024-11-11T07:01:44,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:01:44,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:01:44,461 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:01:44,461 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f334e8b0fbdf,35163,1731308504384 2024-11-11T07:01:44,466 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/hbase.id] with ID: 5e8404a1-ca1c-4e8c-8746-0419487eb079 2024-11-11T07:01:44,466 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/.tmp/hbase.id 2024-11-11T07:01:44,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:01:44,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:01:44,471 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/.tmp/hbase.id]:[hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/hbase.id] 2024-11-11T07:01:44,482 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:44,482 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T07:01:44,483 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T07:01:44,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:44,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:44,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:01:44,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:01:44,493 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T07:01:44,493 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T07:01:44,493 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:01:44,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:01:44,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:01:44,502 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store 2024-11-11T07:01:44,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:01:44,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:01:44,510 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:44,511 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:01:44,511 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:44,511 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:44,511 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:01:44,511 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:44,511 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:01:44,511 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308504511Disabling compacts and flushes for region at 1731308504511Disabling writes for close at 1731308504511Writing region close event to WAL at 1731308504511Closed at 1731308504511 2024-11-11T07:01:44,512 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/.initializing 2024-11-11T07:01:44,512 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/WALs/f334e8b0fbdf,35163,1731308504384 2024-11-11T07:01:44,514 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C35163%2C1731308504384, suffix=, logDir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/WALs/f334e8b0fbdf,35163,1731308504384, archiveDir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/oldWALs, maxLogs=10 2024-11-11T07:01:44,514 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C35163%2C1731308504384.1731308504514 2024-11-11T07:01:44,519 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/WALs/f334e8b0fbdf,35163,1731308504384/f334e8b0fbdf%2C35163%2C1731308504384.1731308504514 2024-11-11T07:01:44,520 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41937:41937),(127.0.0.1/127.0.0.1:46397:46397)] 2024-11-11T07:01:44,521 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:01:44,521 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:44,521 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,521 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T07:01:44,523 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:44,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:44,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T07:01:44,525 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:44,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:01:44,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,527 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T07:01:44,527 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:44,527 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:01:44,527 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T07:01:44,528 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:44,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:01:44,529 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,530 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,530 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,531 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,531 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,532 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T07:01:44,533 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:01:44,535 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:01:44,536 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851789, jitterRate=0.08310693502426147}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T07:01:44,537 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731308504521Initializing all the Stores at 1731308504522 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308504522Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308504522Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308504522Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308504522Cleaning up temporary data from old regions at 1731308504531 (+9 ms)Region opened successfully at 1731308504537 (+6 ms) 2024-11-11T07:01:44,537 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T07:01:44,540 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c0e595c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:01:44,541 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T07:01:44,541 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T07:01:44,541 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T07:01:44,541 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T07:01:44,542 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T07:01:44,542 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T07:01:44,542 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T07:01:44,544 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T07:01:44,545 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T07:01:44,546 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T07:01:44,547 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T07:01:44,547 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T07:01:44,548 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T07:01:44,549 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T07:01:44,549 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T07:01:44,551 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T07:01:44,551 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T07:01:44,554 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T07:01:44,555 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T07:01:44,556 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T07:01:44,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:01:44,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:01:44,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:44,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:44,558 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f334e8b0fbdf,35163,1731308504384, sessionid=0x10105d52fc20000, setting cluster-up flag (Was=false) 2024-11-11T07:01:44,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:44,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:44,566 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T07:01:44,567 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,35163,1731308504384 2024-11-11T07:01:44,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:44,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:44,576 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T07:01:44,577 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,35163,1731308504384 2024-11-11T07:01:44,578 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T07:01:44,579 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T07:01:44,580 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T07:01:44,580 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T07:01:44,580 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f334e8b0fbdf,35163,1731308504384 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T07:01:44,581 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:01:44,581 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:01:44,581 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:01:44,581 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:01:44,581 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f334e8b0fbdf:0, corePoolSize=10, maxPoolSize=10 2024-11-11T07:01:44,581 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,581 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:01:44,581 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,582 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731308534582 2024-11-11T07:01:44,582 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T07:01:44,582 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T07:01:44,582 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T07:01:44,582 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T07:01:44,582 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T07:01:44,582 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T07:01:44,582 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,583 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T07:01:44,583 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T07:01:44,583 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T07:01:44,583 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:01:44,583 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T07:01:44,583 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T07:01:44,583 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T07:01:44,583 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308504583,5,FailOnTimeoutGroup] 2024-11-11T07:01:44,583 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308504583,5,FailOnTimeoutGroup] 2024-11-11T07:01:44,583 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,584 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T07:01:44,584 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,584 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,584 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:44,584 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T07:01:44,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:01:44,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:01:44,591 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T07:01:44,591 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17 2024-11-11T07:01:44,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:01:44,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:01:44,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:44,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:01:44,598 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:01:44,598 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:44,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:44,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:01:44,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:01:44,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:44,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:44,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:01:44,601 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:01:44,601 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:44,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:44,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:01:44,602 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:01:44,602 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:44,603 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:44,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:01:44,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740 2024-11-11T07:01:44,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740 2024-11-11T07:01:44,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:01:44,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:01:44,605 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:01:44,606 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:01:44,607 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:01:44,608 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786815, jitterRate=4.8732757568359375E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:01:44,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731308504597Initializing all the Stores at 1731308504597Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308504597Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308504597Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308504597Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308504597Cleaning up temporary data from old regions at 1731308504604 (+7 ms)Region opened successfully at 1731308504608 (+4 ms) 2024-11-11T07:01:44,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:01:44,609 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:01:44,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:01:44,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:01:44,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:01:44,609 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:01:44,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308504609Disabling compacts and flushes for region at 1731308504609Disabling writes for close at 1731308504609Writing region close event to WAL at 1731308504609Closed at 1731308504609 2024-11-11T07:01:44,610 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:01:44,610 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T07:01:44,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T07:01:44,612 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:01:44,613 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T07:01:44,645 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(746): ClusterId : 5e8404a1-ca1c-4e8c-8746-0419487eb079 2024-11-11T07:01:44,646 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T07:01:44,648 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T07:01:44,648 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T07:01:44,649 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T07:01:44,650 DEBUG [RS:0;f334e8b0fbdf:39227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8084645, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:01:44,661 DEBUG [RS:0;f334e8b0fbdf:39227 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f334e8b0fbdf:39227 2024-11-11T07:01:44,661 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T07:01:44,662 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T07:01:44,662 DEBUG [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T07:01:44,662 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(2659): reportForDuty to master=f334e8b0fbdf,35163,1731308504384 with port=39227, startcode=1731308504431 2024-11-11T07:01:44,662 DEBUG [RS:0;f334e8b0fbdf:39227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T07:01:44,664 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55109, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T07:01:44,665 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35163 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:44,665 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35163 {}] master.ServerManager(517): Registering regionserver=f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:44,666 DEBUG [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17 2024-11-11T07:01:44,666 DEBUG [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33781 2024-11-11T07:01:44,666 DEBUG [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T07:01:44,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:01:44,668 DEBUG [RS:0;f334e8b0fbdf:39227 {}] zookeeper.ZKUtil(111): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:44,668 WARN [RS:0;f334e8b0fbdf:39227 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:01:44,669 INFO [RS:0;f334e8b0fbdf:39227 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:01:44,669 DEBUG [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:44,669 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f334e8b0fbdf,39227,1731308504431] 2024-11-11T07:01:44,672 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T07:01:44,674 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T07:01:44,674 INFO [RS:0;f334e8b0fbdf:39227 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T07:01:44,674 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,674 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T07:01:44,675 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T07:01:44,675 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:01:44,675 DEBUG [RS:0;f334e8b0fbdf:39227 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:01:44,676 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,676 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,676 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,676 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,676 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,676 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39227,1731308504431-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:01:44,690 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T07:01:44,691 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39227,1731308504431-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,691 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,691 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.Replication(171): f334e8b0fbdf,39227,1731308504431 started 2024-11-11T07:01:44,704 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:44,704 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(1482): Serving as f334e8b0fbdf,39227,1731308504431, RpcServer on f334e8b0fbdf/172.17.0.2:39227, sessionid=0x10105d52fc20001 2024-11-11T07:01:44,704 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T07:01:44,704 DEBUG [RS:0;f334e8b0fbdf:39227 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:44,704 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,39227,1731308504431' 2024-11-11T07:01:44,704 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T07:01:44,705 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T07:01:44,705 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T07:01:44,705 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T07:01:44,705 DEBUG [RS:0;f334e8b0fbdf:39227 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:44,705 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,39227,1731308504431' 2024-11-11T07:01:44,705 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T07:01:44,705 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T07:01:44,706 DEBUG [RS:0;f334e8b0fbdf:39227 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T07:01:44,706 INFO [RS:0;f334e8b0fbdf:39227 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T07:01:44,706 INFO [RS:0;f334e8b0fbdf:39227 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T07:01:44,763 WARN [f334e8b0fbdf:35163 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T07:01:44,808 INFO [RS:0;f334e8b0fbdf:39227 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C39227%2C1731308504431, suffix=, logDir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431, archiveDir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/oldWALs, maxLogs=32 2024-11-11T07:01:44,808 INFO [RS:0;f334e8b0fbdf:39227 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C39227%2C1731308504431.1731308504808 2024-11-11T07:01:44,816 INFO [RS:0;f334e8b0fbdf:39227 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308504808 2024-11-11T07:01:44,817 DEBUG [RS:0;f334e8b0fbdf:39227 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41937:41937),(127.0.0.1/127.0.0.1:46397:46397)] 2024-11-11T07:01:45,013 DEBUG [f334e8b0fbdf:35163 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T07:01:45,014 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:45,016 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,39227,1731308504431, state=OPENING 2024-11-11T07:01:45,018 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T07:01:45,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:45,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:01:45,019 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:01:45,020 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,39227,1731308504431}] 2024-11-11T07:01:45,020 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:01:45,020 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:01:45,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:45,173 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T07:01:45,175 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52211, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T07:01:45,178 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T07:01:45,178 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:01:45,180 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C39227%2C1731308504431.meta, suffix=.meta, logDir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431, archiveDir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/oldWALs, maxLogs=32 2024-11-11T07:01:45,180 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C39227%2C1731308504431.meta.1731308505180.meta 2024-11-11T07:01:45,186 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.meta.1731308505180.meta 2024-11-11T07:01:45,187 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41937:41937),(127.0.0.1/127.0.0.1:46397:46397)] 2024-11-11T07:01:45,188 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:01:45,188 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T07:01:45,188 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T07:01:45,188 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T07:01:45,188 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T07:01:45,188 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:45,188 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T07:01:45,188 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T07:01:45,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:01:45,190 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:01:45,190 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:45,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:45,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:01:45,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:01:45,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:45,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:45,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:01:45,193 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:01:45,193 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:45,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:45,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:01:45,194 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:01:45,194 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:45,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:01:45,194 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:01:45,195 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740 2024-11-11T07:01:45,196 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740 2024-11-11T07:01:45,197 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:01:45,197 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:01:45,198 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:01:45,199 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:01:45,200 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=765679, jitterRate=-0.026389166712760925}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:01:45,200 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T07:01:45,200 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731308505189Writing region info on filesystem at 1731308505189Initializing all the Stores at 1731308505189Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308505189Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308505189Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308505189Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308505189Cleaning up temporary data from old regions at 1731308505197 (+8 ms)Running coprocessor post-open hooks at 1731308505200 (+3 ms)Region opened successfully at 1731308505200 2024-11-11T07:01:45,201 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731308505173 2024-11-11T07:01:45,204 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T07:01:45,204 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T07:01:45,204 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:45,205 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,39227,1731308504431, state=OPEN 2024-11-11T07:01:45,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:01:45,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:01:45,210 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:45,210 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:01:45,210 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:01:45,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T07:01:45,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,39227,1731308504431 in 190 msec 2024-11-11T07:01:45,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T07:01:45,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-11-11T07:01:45,216 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:01:45,216 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T07:01:45,218 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:01:45,218 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,39227,1731308504431, seqNum=-1] 2024-11-11T07:01:45,218 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:01:45,220 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34357, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:01:45,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:45,225 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 646 msec 2024-11-11T07:01:45,226 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731308505225, completionTime=-1 2024-11-11T07:01:45,226 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T07:01:45,226 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T07:01:45,228 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-11T07:01:45,228 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731308565228 2024-11-11T07:01:45,228 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731308625228 2024-11-11T07:01:45,228 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-11T07:01:45,228 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,35163,1731308504384-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:45,229 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,35163,1731308504384-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:45,229 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,35163,1731308504384-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:45,229 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f334e8b0fbdf:35163, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:45,229 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:45,229 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:45,230 DEBUG [master/f334e8b0fbdf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T07:01:45,232 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.771sec 2024-11-11T07:01:45,232 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T07:01:45,232 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T07:01:45,232 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T07:01:45,232 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T07:01:45,232 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T07:01:45,233 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,35163,1731308504384-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:01:45,233 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,35163,1731308504384-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T07:01:45,235 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T07:01:45,235 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T07:01:45,235 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,35163,1731308504384-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:01:45,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:45,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c2ac8ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:01:45,246 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f334e8b0fbdf,35163,-1 for getting cluster id 2024-11-11T07:01:45,246 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T07:01:45,247 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5e8404a1-ca1c-4e8c-8746-0419487eb079' 2024-11-11T07:01:45,248 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T07:01:45,248 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5e8404a1-ca1c-4e8c-8746-0419487eb079" 2024-11-11T07:01:45,248 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e5d3593, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:01:45,248 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f334e8b0fbdf,35163,-1] 2024-11-11T07:01:45,249 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T07:01:45,249 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:01:45,250 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33116, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T07:01:45,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e2a30ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:01:45,251 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:01:45,252 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,39227,1731308504431, seqNum=-1] 2024-11-11T07:01:45,252 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:01:45,253 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49856, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:01:45,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f334e8b0fbdf,35163,1731308504384 2024-11-11T07:01:45,255 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:01:45,257 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T07:01:45,258 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T07:01:45,259 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is f334e8b0fbdf,35163,1731308504384 2024-11-11T07:01:45,259 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7ef7c635 2024-11-11T07:01:45,259 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T07:01:45,260 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33130, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T07:01:45,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T07:01:45,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T07:01:45,261 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T07:01:45,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:01:45,263 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T07:01:45,263 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:45,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-11T07:01:45,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T07:01:45,264 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T07:01:45,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741835_1011 (size=405) 2024-11-11T07:01:45,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741835_1011 (size=405) 2024-11-11T07:01:45,272 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 648e76b98bee8424b6ed037033a5d7c0, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17 2024-11-11T07:01:45,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741836_1012 (size=88) 2024-11-11T07:01:45,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741836_1012 (size=88) 2024-11-11T07:01:45,280 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:45,281 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 648e76b98bee8424b6ed037033a5d7c0, disabling compactions & flushes 2024-11-11T07:01:45,281 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:01:45,281 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:01:45,281 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. after waiting 0 ms 2024-11-11T07:01:45,281 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:01:45,281 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:01:45,281 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 648e76b98bee8424b6ed037033a5d7c0: Waiting for close lock at 1731308505281Disabling compacts and flushes for region at 1731308505281Disabling writes for close at 1731308505281Writing region close event to WAL at 1731308505281Closed at 1731308505281 2024-11-11T07:01:45,282 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T07:01:45,283 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731308505282"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731308505282"}]},"ts":"1731308505282"} 2024-11-11T07:01:45,285 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T07:01:45,286 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T07:01:45,287 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731308505287"}]},"ts":"1731308505287"} 2024-11-11T07:01:45,289 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-11T07:01:45,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=648e76b98bee8424b6ed037033a5d7c0, ASSIGN}] 2024-11-11T07:01:45,291 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=648e76b98bee8424b6ed037033a5d7c0, ASSIGN 2024-11-11T07:01:45,292 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=648e76b98bee8424b6ed037033a5d7c0, ASSIGN; state=OFFLINE, location=f334e8b0fbdf,39227,1731308504431; forceNewPlan=false, retain=false 2024-11-11T07:01:45,443 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=648e76b98bee8424b6ed037033a5d7c0, regionState=OPENING, regionLocation=f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:45,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=648e76b98bee8424b6ed037033a5d7c0, ASSIGN because future has completed 2024-11-11T07:01:45,447 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 648e76b98bee8424b6ed037033a5d7c0, server=f334e8b0fbdf,39227,1731308504431}] 2024-11-11T07:01:45,603 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:01:45,603 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 648e76b98bee8424b6ed037033a5d7c0, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:01:45,604 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,604 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:01:45,604 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,604 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,605 INFO [StoreOpener-648e76b98bee8424b6ed037033a5d7c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,606 INFO [StoreOpener-648e76b98bee8424b6ed037033a5d7c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 648e76b98bee8424b6ed037033a5d7c0 columnFamilyName info 2024-11-11T07:01:45,606 DEBUG [StoreOpener-648e76b98bee8424b6ed037033a5d7c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:01:45,607 INFO [StoreOpener-648e76b98bee8424b6ed037033a5d7c0-1 {}] regionserver.HStore(327): Store=648e76b98bee8424b6ed037033a5d7c0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:01:45,607 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,608 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,608 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,608 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,608 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,610 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,611 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:01:45,612 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 648e76b98bee8424b6ed037033a5d7c0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750575, jitterRate=-0.04559536278247833}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T07:01:45,612 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:01:45,612 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 648e76b98bee8424b6ed037033a5d7c0: Running coprocessor pre-open hook at 1731308505604Writing region info on filesystem at 1731308505604Initializing all the Stores at 1731308505605 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308505605Cleaning up temporary data from old regions at 1731308505608 (+3 ms)Running coprocessor post-open hooks at 1731308505612 (+4 ms)Region opened successfully at 1731308505612 2024-11-11T07:01:45,614 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0., pid=6, masterSystemTime=1731308505599 2024-11-11T07:01:45,616 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:01:45,616 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:01:45,617 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=648e76b98bee8424b6ed037033a5d7c0, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,39227,1731308504431 2024-11-11T07:01:45,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 648e76b98bee8424b6ed037033a5d7c0, server=f334e8b0fbdf,39227,1731308504431 because future has completed 2024-11-11T07:01:45,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T07:01:45,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 648e76b98bee8424b6ed037033a5d7c0, server=f334e8b0fbdf,39227,1731308504431 in 173 msec 2024-11-11T07:01:45,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T07:01:45,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=648e76b98bee8424b6ed037033a5d7c0, ASSIGN in 333 msec 2024-11-11T07:01:45,627 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T07:01:45,627 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731308505627"}]},"ts":"1731308505627"} 2024-11-11T07:01:45,630 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-11T07:01:45,631 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T07:01:45,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 370 msec 2024-11-11T07:01:46,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:46,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:46,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:47,108 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T07:01:47,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:01:47,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:47,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:47,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:48,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:48,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:48,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:49,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:49,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:49,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:50,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:50,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:50,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:50,672 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T07:01:50,673 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-11T07:01:51,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:51,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:51,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:51,605 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T07:01:51,605 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T07:01:51,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:01:51,606 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T07:01:51,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T07:01:51,606 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-11T07:01:51,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:01:51,607 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T07:01:52,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:52,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:52,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:53,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:53,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:53,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:54,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:54,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:54,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:55,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:55,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:55,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:55,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T07:01:55,356 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T07:01:55,356 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-11T07:01:55,359 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:01:55,359 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:01:55,362 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0., hostname=f334e8b0fbdf,39227,1731308504431, seqNum=2] 2024-11-11T07:01:55,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:01:55,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:01:55,374 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T07:01:55,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T07:01:55,375 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T07:01:55,377 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T07:01:55,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39227 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-11T07:01:55,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:01:55,538 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 648e76b98bee8424b6ed037033a5d7c0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T07:01:55,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/baae6b2728d94f328f2454fab656116d is 1080, key is row0001/info:/1731308515363/Put/seqid=0 2024-11-11T07:01:55,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741837_1013 (size=6033) 2024-11-11T07:01:55,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741837_1013 (size=6033) 2024-11-11T07:01:55,560 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/baae6b2728d94f328f2454fab656116d 2024-11-11T07:01:55,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/baae6b2728d94f328f2454fab656116d as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/baae6b2728d94f328f2454fab656116d 2024-11-11T07:01:55,573 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/baae6b2728d94f328f2454fab656116d, entries=1, sequenceid=5, filesize=5.9 K 2024-11-11T07:01:55,574 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 648e76b98bee8424b6ed037033a5d7c0 in 37ms, sequenceid=5, compaction requested=false 2024-11-11T07:01:55,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 648e76b98bee8424b6ed037033a5d7c0: 2024-11-11T07:01:55,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:01:55,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-11T07:01:55,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-11T07:01:55,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-11T07:01:55,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 202 msec 2024-11-11T07:01:55,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 212 msec 2024-11-11T07:01:56,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:56,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:56,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:57,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:57,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:57,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:58,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:58,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:58,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:59,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:59,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:01:59,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:00,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:00,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:00,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:01,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:01,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:01,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:02,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:02,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:02,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:03,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:03,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:03,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:04,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:04,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:04,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:05,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:05,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:05,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:05,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T07:02:05,456 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T07:02:05,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:02:05,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:02:05,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-11T07:02:05,461 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T07:02:05,462 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T07:02:05,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T07:02:05,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39227 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-11T07:02:05,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:05,616 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 648e76b98bee8424b6ed037033a5d7c0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T07:02:05,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/ec41434bd5074309b84570fdb76c5d34 is 1080, key is row0002/info:/1731308525457/Put/seqid=0 2024-11-11T07:02:05,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741838_1014 (size=6033) 2024-11-11T07:02:05,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741838_1014 (size=6033) 2024-11-11T07:02:05,627 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/ec41434bd5074309b84570fdb76c5d34 2024-11-11T07:02:05,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/ec41434bd5074309b84570fdb76c5d34 as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/ec41434bd5074309b84570fdb76c5d34 2024-11-11T07:02:05,639 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/ec41434bd5074309b84570fdb76c5d34, entries=1, sequenceid=9, filesize=5.9 K 2024-11-11T07:02:05,640 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 648e76b98bee8424b6ed037033a5d7c0 in 24ms, sequenceid=9, compaction requested=false 2024-11-11T07:02:05,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 648e76b98bee8424b6ed037033a5d7c0: 2024-11-11T07:02:05,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:05,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-11T07:02:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-11T07:02:05,645 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-11T07:02:05,645 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-11T07:02:05,647 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-11-11T07:02:06,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:06,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:06,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 after 68045ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:02:06,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:06,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta after 68041ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T07:02:07,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:07,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:07,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:08,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:08,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:08,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:09,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:09,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:09,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:10,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:10,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:10,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:11,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:11,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:11,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:12,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:12,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:12,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:13,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:13,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:13,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:14,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:14,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:14,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:14,368 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T07:02:15,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:15,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:15,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-11T07:02:15,476 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T07:02:15,478 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C39227%2C1731308504431.1731308535478 2024-11-11T07:02:15,485 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:15,485 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:15,485 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:15,485 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:15,485 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:15,485 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308504808 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308535478 2024-11-11T07:02:15,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741833_1009 (size=5546) 2024-11-11T07:02:15,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741833_1009 (size=5546) 2024-11-11T07:02:15,491 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41937:41937),(127.0.0.1/127.0.0.1:46397:46397)] 2024-11-11T07:02:15,492 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:02:15,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:02:15,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-11T07:02:15,494 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T07:02:15,495 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T07:02:15,495 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T07:02:15,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39227 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-11T07:02:15,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:15,649 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 648e76b98bee8424b6ed037033a5d7c0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T07:02:15,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/607bd85b6d3c4f20a87d1e53116756a3 is 1080, key is row0003/info:/1731308535477/Put/seqid=0 2024-11-11T07:02:15,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741840_1016 (size=6033) 2024-11-11T07:02:15,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741840_1016 (size=6033) 2024-11-11T07:02:15,659 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/607bd85b6d3c4f20a87d1e53116756a3 2024-11-11T07:02:15,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/607bd85b6d3c4f20a87d1e53116756a3 as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/607bd85b6d3c4f20a87d1e53116756a3 2024-11-11T07:02:15,671 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/607bd85b6d3c4f20a87d1e53116756a3, entries=1, sequenceid=13, filesize=5.9 K 2024-11-11T07:02:15,673 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 648e76b98bee8424b6ed037033a5d7c0 in 23ms, sequenceid=13, compaction requested=true 2024-11-11T07:02:15,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 648e76b98bee8424b6ed037033a5d7c0: 2024-11-11T07:02:15,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:15,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-11T07:02:15,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-11T07:02:15,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-11T07:02:15,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-11T07:02:15,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-11T07:02:16,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:16,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:16,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:17,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:17,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:17,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:18,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:18,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:18,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:19,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:19,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:19,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:20,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:20,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:20,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:21,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:21,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:21,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:22,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:22,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:22,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:23,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:23,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:23,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:24,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:24,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:24,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:25,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:25,237 INFO [master/f334e8b0fbdf:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T07:02:25,237 INFO [master/f334e8b0fbdf:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T07:02:25,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:25,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-11T07:02:25,505 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T07:02:25,506 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T07:02:25,507 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T07:02:25,507 DEBUG [Time-limited test {}] regionserver.HStore(1541): 648e76b98bee8424b6ed037033a5d7c0/info is initiating minor compaction (all files) 2024-11-11T07:02:25,507 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T07:02:25,507 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:25,507 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 648e76b98bee8424b6ed037033a5d7c0/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:25,507 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/baae6b2728d94f328f2454fab656116d, hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/ec41434bd5074309b84570fdb76c5d34, hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/607bd85b6d3c4f20a87d1e53116756a3] into tmpdir=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp, totalSize=17.7 K 2024-11-11T07:02:25,508 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting baae6b2728d94f328f2454fab656116d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731308515363 2024-11-11T07:02:25,508 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ec41434bd5074309b84570fdb76c5d34, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731308525457 2024-11-11T07:02:25,508 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 607bd85b6d3c4f20a87d1e53116756a3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731308535477 2024-11-11T07:02:25,519 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 648e76b98bee8424b6ed037033a5d7c0#info#compaction#43 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:02:25,520 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/92ce943d32cc477890f3c923e5d4322a is 1080, key is row0001/info:/1731308515363/Put/seqid=0 2024-11-11T07:02:25,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741841_1017 (size=8296) 2024-11-11T07:02:25,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741841_1017 (size=8296) 2024-11-11T07:02:25,531 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/92ce943d32cc477890f3c923e5d4322a as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/92ce943d32cc477890f3c923e5d4322a 2024-11-11T07:02:25,537 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 648e76b98bee8424b6ed037033a5d7c0/info of 648e76b98bee8424b6ed037033a5d7c0 into 92ce943d32cc477890f3c923e5d4322a(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:02:25,537 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 648e76b98bee8424b6ed037033a5d7c0: 2024-11-11T07:02:25,540 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C39227%2C1731308504431.1731308545540 2024-11-11T07:02:25,546 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:25,546 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:25,547 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:25,547 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:25,547 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:25,547 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308535478 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308545540 2024-11-11T07:02:25,548 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41937:41937),(127.0.0.1/127.0.0.1:46397:46397)] 2024-11-11T07:02:25,548 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308535478 is not closed yet, will try archiving it next time 2024-11-11T07:02:25,548 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308504808 to hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/oldWALs/f334e8b0fbdf%2C39227%2C1731308504431.1731308504808 2024-11-11T07:02:25,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741839_1015 (size=2520) 2024-11-11T07:02:25,549 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:02:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:02:25,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741839_1015 (size=2520) 2024-11-11T07:02:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-11T07:02:25,552 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T07:02:25,553 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T07:02:25,553 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T07:02:25,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39227 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-11T07:02:25,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:25,706 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 648e76b98bee8424b6ed037033a5d7c0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T07:02:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/2edb454cb95f48cba8de71542a27d07f is 1080, key is row0000/info:/1731308545538/Put/seqid=0 2024-11-11T07:02:25,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741843_1019 (size=6033) 2024-11-11T07:02:25,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741843_1019 (size=6033) 2024-11-11T07:02:25,717 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/2edb454cb95f48cba8de71542a27d07f 2024-11-11T07:02:25,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/2edb454cb95f48cba8de71542a27d07f as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/2edb454cb95f48cba8de71542a27d07f 2024-11-11T07:02:25,731 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/2edb454cb95f48cba8de71542a27d07f, entries=1, sequenceid=18, filesize=5.9 K 2024-11-11T07:02:25,732 INFO [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 648e76b98bee8424b6ed037033a5d7c0 in 26ms, sequenceid=18, compaction requested=false 2024-11-11T07:02:25,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 648e76b98bee8424b6ed037033a5d7c0: 2024-11-11T07:02:25,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:25,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-11T07:02:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-11T07:02:25,737 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-11T07:02:25,737 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-11-11T07:02:25,740 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-11-11T07:02:26,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:26,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:26,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:27,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:27,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:27,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:28,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:28,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:28,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:29,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:29,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:29,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:30,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:30,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:30,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:30,604 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 648e76b98bee8424b6ed037033a5d7c0, had cached 0 bytes from a total of 14329 2024-11-11T07:02:31,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:31,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:31,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:32,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:32,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:32,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:33,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:33,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:33,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:34,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:34,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:34,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:35,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:35,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:35,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35163 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-11T07:02:35,645 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T07:02:35,648 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C39227%2C1731308504431.1731308555648 2024-11-11T07:02:35,654 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,654 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,654 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,654 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,654 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,655 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308545540 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308555648 2024-11-11T07:02:35,655 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46397:46397),(127.0.0.1/127.0.0.1:41937:41937)] 2024-11-11T07:02:35,655 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308545540 is not closed yet, will try archiving it next time 2024-11-11T07:02:35,655 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/WALs/f334e8b0fbdf,39227,1731308504431/f334e8b0fbdf%2C39227%2C1731308504431.1731308535478 to hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/oldWALs/f334e8b0fbdf%2C39227%2C1731308504431.1731308535478 2024-11-11T07:02:35,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T07:02:35,656 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:02:35,656 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:02:35,656 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:02:35,656 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:02:35,656 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T07:02:35,656 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T07:02:35,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741842_1018 (size=2026) 2024-11-11T07:02:35,656 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1298482849, stopped=false 2024-11-11T07:02:35,656 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f334e8b0fbdf,35163,1731308504384 2024-11-11T07:02:35,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741842_1018 (size=2026) 2024-11-11T07:02:35,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:02:35,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:02:35,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:35,658 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:02:35,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:35,658 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:02:35,658 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:02:35,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:02:35,658 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f334e8b0fbdf,39227,1731308504431' ***** 2024-11-11T07:02:35,658 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T07:02:35,659 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:02:35,659 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:02:35,660 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T07:02:35,660 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T07:02:35,660 INFO [RS:0;f334e8b0fbdf:39227 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T07:02:35,660 INFO [RS:0;f334e8b0fbdf:39227 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T07:02:35,660 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(3091): Received CLOSE for 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:02:35,661 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(959): stopping server f334e8b0fbdf,39227,1731308504431 2024-11-11T07:02:35,661 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:02:35,661 INFO [RS:0;f334e8b0fbdf:39227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f334e8b0fbdf:39227. 2024-11-11T07:02:35,661 DEBUG [RS:0;f334e8b0fbdf:39227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:02:35,661 DEBUG [RS:0;f334e8b0fbdf:39227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:02:35,661 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 648e76b98bee8424b6ed037033a5d7c0, disabling compactions & flushes 2024-11-11T07:02:35,661 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:35,661 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T07:02:35,661 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T07:02:35,661 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T07:02:35,661 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:35,661 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. after waiting 0 ms 2024-11-11T07:02:35,661 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T07:02:35,661 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:35,661 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 648e76b98bee8424b6ed037033a5d7c0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T07:02:35,661 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T07:02:35,661 DEBUG [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(1325): Online Regions={648e76b98bee8424b6ed037033a5d7c0=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T07:02:35,661 DEBUG [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 648e76b98bee8424b6ed037033a5d7c0 2024-11-11T07:02:35,661 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:02:35,661 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:02:35,661 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:02:35,661 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:02:35,661 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:02:35,661 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-11T07:02:35,665 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/3959e4117ac0421da61027870abc3bf0 is 1080, key is row0001/info:/1731308555647/Put/seqid=0 2024-11-11T07:02:35,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741845_1021 (size=6033) 2024-11-11T07:02:35,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741845_1021 (size=6033) 2024-11-11T07:02:35,672 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/3959e4117ac0421da61027870abc3bf0 2024-11-11T07:02:35,677 INFO [regionserver/f334e8b0fbdf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T07:02:35,677 INFO [regionserver/f334e8b0fbdf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T07:02:35,678 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/.tmp/info/3959e4117ac0421da61027870abc3bf0 as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/3959e4117ac0421da61027870abc3bf0 2024-11-11T07:02:35,683 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/3959e4117ac0421da61027870abc3bf0, entries=1, sequenceid=22, filesize=5.9 K 2024-11-11T07:02:35,684 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/.tmp/info/8820228b1e2a43d99e2269178d4a5d2e is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0./info:regioninfo/1731308505617/Put/seqid=0 2024-11-11T07:02:35,684 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 648e76b98bee8424b6ed037033a5d7c0 in 23ms, sequenceid=22, compaction requested=true 2024-11-11T07:02:35,684 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/baae6b2728d94f328f2454fab656116d, hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/ec41434bd5074309b84570fdb76c5d34, hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/607bd85b6d3c4f20a87d1e53116756a3] to archive 2024-11-11T07:02:35,685 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T07:02:35,687 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/baae6b2728d94f328f2454fab656116d to hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/baae6b2728d94f328f2454fab656116d 2024-11-11T07:02:35,688 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/ec41434bd5074309b84570fdb76c5d34 to hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/ec41434bd5074309b84570fdb76c5d34 2024-11-11T07:02:35,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741846_1022 (size=7308) 2024-11-11T07:02:35,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741846_1022 (size=7308) 2024-11-11T07:02:35,690 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/.tmp/info/8820228b1e2a43d99e2269178d4a5d2e 2024-11-11T07:02:35,690 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/607bd85b6d3c4f20a87d1e53116756a3 to hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/info/607bd85b6d3c4f20a87d1e53116756a3 2024-11-11T07:02:35,690 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f334e8b0fbdf:35163 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-11T07:02:35,691 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [baae6b2728d94f328f2454fab656116d=6033, ec41434bd5074309b84570fdb76c5d34=6033, 607bd85b6d3c4f20a87d1e53116756a3=6033] 2024-11-11T07:02:35,694 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/648e76b98bee8424b6ed037033a5d7c0/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-11T07:02:35,695 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:35,695 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 648e76b98bee8424b6ed037033a5d7c0: Waiting for close lock at 1731308555661Running coprocessor pre-close hooks at 1731308555661Disabling compacts and flushes for region at 1731308555661Disabling writes for close at 1731308555661Obtaining lock to block concurrent updates at 1731308555661Preparing flush snapshotting stores in 648e76b98bee8424b6ed037033a5d7c0 at 1731308555661Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731308555661Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. at 1731308555662 (+1 ms)Flushing 648e76b98bee8424b6ed037033a5d7c0/info: creating writer at 1731308555662Flushing 648e76b98bee8424b6ed037033a5d7c0/info: appending metadata at 1731308555665 (+3 ms)Flushing 648e76b98bee8424b6ed037033a5d7c0/info: closing flushed file at 1731308555665Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15c4484c: reopening flushed file at 1731308555677 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 648e76b98bee8424b6ed037033a5d7c0 in 23ms, sequenceid=22, compaction requested=true at 1731308555684 (+7 ms)Writing region close event to WAL at 1731308555691 (+7 ms)Running coprocessor post-close hooks at 1731308555695 (+4 ms)Closed at 1731308555695 2024-11-11T07:02:35,695 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731308505260.648e76b98bee8424b6ed037033a5d7c0. 2024-11-11T07:02:35,709 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/.tmp/ns/d633523f8b424f55a4cc0a18b11488ec is 43, key is default/ns:d/1731308505220/Put/seqid=0 2024-11-11T07:02:35,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741847_1023 (size=5153) 2024-11-11T07:02:35,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741847_1023 (size=5153) 2024-11-11T07:02:35,715 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/.tmp/ns/d633523f8b424f55a4cc0a18b11488ec 2024-11-11T07:02:35,734 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/.tmp/table/1d4863420088441a81d2afb0f1ffa4e2 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731308505627/Put/seqid=0 2024-11-11T07:02:35,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741848_1024 (size=5508) 2024-11-11T07:02:35,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741848_1024 (size=5508) 2024-11-11T07:02:35,740 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/.tmp/table/1d4863420088441a81d2afb0f1ffa4e2 2024-11-11T07:02:35,745 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/.tmp/info/8820228b1e2a43d99e2269178d4a5d2e as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/info/8820228b1e2a43d99e2269178d4a5d2e 2024-11-11T07:02:35,750 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/info/8820228b1e2a43d99e2269178d4a5d2e, entries=10, sequenceid=11, filesize=7.1 K 2024-11-11T07:02:35,751 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/.tmp/ns/d633523f8b424f55a4cc0a18b11488ec as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/ns/d633523f8b424f55a4cc0a18b11488ec 2024-11-11T07:02:35,756 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/ns/d633523f8b424f55a4cc0a18b11488ec, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T07:02:35,757 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/.tmp/table/1d4863420088441a81d2afb0f1ffa4e2 as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/table/1d4863420088441a81d2afb0f1ffa4e2 2024-11-11T07:02:35,762 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/table/1d4863420088441a81d2afb0f1ffa4e2, entries=2, sequenceid=11, filesize=5.4 K 2024-11-11T07:02:35,763 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false 2024-11-11T07:02:35,767 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T07:02:35,768 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:02:35,768 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:02:35,768 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308555661Running coprocessor pre-close hooks at 1731308555661Disabling compacts and flushes for region at 1731308555661Disabling writes for close at 1731308555661Obtaining lock to block concurrent updates at 1731308555661Preparing flush snapshotting stores in 1588230740 at 1731308555661Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731308555662 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731308555662Flushing 1588230740/info: creating writer at 1731308555663 (+1 ms)Flushing 1588230740/info: appending metadata at 1731308555683 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731308555683Flushing 1588230740/ns: creating writer at 1731308555695 (+12 ms)Flushing 1588230740/ns: appending metadata at 1731308555709 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731308555709Flushing 1588230740/table: creating writer at 1731308555720 (+11 ms)Flushing 1588230740/table: appending metadata at 1731308555734 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731308555734Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f51be9: reopening flushed file at 1731308555745 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76fd62d7: reopening flushed file at 1731308555751 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3bbca04a: reopening flushed file at 1731308555756 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false at 1731308555763 (+7 ms)Writing region close event to WAL at 1731308555764 (+1 ms)Running coprocessor post-close hooks at 1731308555768 (+4 ms)Closed at 1731308555768 2024-11-11T07:02:35,768 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T07:02:35,861 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(976): stopping server f334e8b0fbdf,39227,1731308504431; all regions closed. 2024-11-11T07:02:35,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741834_1010 (size=3306) 2024-11-11T07:02:35,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741834_1010 (size=3306) 2024-11-11T07:02:35,867 DEBUG [RS:0;f334e8b0fbdf:39227 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/oldWALs 2024-11-11T07:02:35,867 INFO [RS:0;f334e8b0fbdf:39227 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C39227%2C1731308504431.meta:.meta(num 1731308505180) 2024-11-11T07:02:35,867 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,867 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,867 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,867 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,867 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:35,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741844_1020 (size=1252) 2024-11-11T07:02:35,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741844_1020 (size=1252) 2024-11-11T07:02:35,872 DEBUG [RS:0;f334e8b0fbdf:39227 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/oldWALs 2024-11-11T07:02:35,872 INFO [RS:0;f334e8b0fbdf:39227 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C39227%2C1731308504431:(num 1731308555648) 2024-11-11T07:02:35,872 DEBUG [RS:0;f334e8b0fbdf:39227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:02:35,872 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:02:35,872 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:02:35,872 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.ChoreService(370): Chore service for: regionserver/f334e8b0fbdf:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T07:02:35,873 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:02:35,873 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:02:35,873 INFO [RS:0;f334e8b0fbdf:39227 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39227 2024-11-11T07:02:35,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f334e8b0fbdf,39227,1731308504431 2024-11-11T07:02:35,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:02:35,875 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:02:35,877 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f334e8b0fbdf,39227,1731308504431] 2024-11-11T07:02:35,880 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f334e8b0fbdf,39227,1731308504431 already deleted, retry=false 2024-11-11T07:02:35,880 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f334e8b0fbdf,39227,1731308504431 expired; onlineServers=0 2024-11-11T07:02:35,880 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f334e8b0fbdf,35163,1731308504384' ***** 2024-11-11T07:02:35,880 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T07:02:35,880 INFO [M:0;f334e8b0fbdf:35163 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:02:35,880 INFO [M:0;f334e8b0fbdf:35163 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:02:35,880 DEBUG [M:0;f334e8b0fbdf:35163 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T07:02:35,880 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T07:02:35,881 DEBUG [M:0;f334e8b0fbdf:35163 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T07:02:35,881 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308504583 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308504583,5,FailOnTimeoutGroup] 2024-11-11T07:02:35,881 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308504583 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308504583,5,FailOnTimeoutGroup] 2024-11-11T07:02:35,881 INFO [M:0;f334e8b0fbdf:35163 {}] hbase.ChoreService(370): Chore service for: master/f334e8b0fbdf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T07:02:35,881 INFO [M:0;f334e8b0fbdf:35163 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:02:35,881 DEBUG [M:0;f334e8b0fbdf:35163 {}] master.HMaster(1795): Stopping service threads 2024-11-11T07:02:35,881 INFO [M:0;f334e8b0fbdf:35163 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T07:02:35,881 INFO [M:0;f334e8b0fbdf:35163 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:02:35,881 INFO [M:0;f334e8b0fbdf:35163 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T07:02:35,881 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T07:02:35,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T07:02:35,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:35,882 DEBUG [M:0;f334e8b0fbdf:35163 {}] zookeeper.ZKUtil(347): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T07:02:35,882 WARN [M:0;f334e8b0fbdf:35163 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T07:02:35,883 INFO [M:0;f334e8b0fbdf:35163 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/.lastflushedseqids 2024-11-11T07:02:35,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741849_1025 (size=130) 2024-11-11T07:02:35,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741849_1025 (size=130) 2024-11-11T07:02:35,888 INFO [M:0;f334e8b0fbdf:35163 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T07:02:35,888 INFO [M:0;f334e8b0fbdf:35163 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T07:02:35,888 DEBUG [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:02:35,888 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:02:35,888 DEBUG [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:02:35,888 DEBUG [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:02:35,888 DEBUG [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:02:35,888 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-11T07:02:35,904 DEBUG [M:0;f334e8b0fbdf:35163 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6cf4bac452c94d169824a385e1182733 is 82, key is hbase:meta,,1/info:regioninfo/1731308505204/Put/seqid=0 2024-11-11T07:02:35,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741850_1026 (size=5672) 2024-11-11T07:02:35,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741850_1026 (size=5672) 2024-11-11T07:02:35,909 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6cf4bac452c94d169824a385e1182733 2024-11-11T07:02:35,928 DEBUG [M:0;f334e8b0fbdf:35163 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee0d7be5efdb40a5b8a9138c8317faad is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731308505632/Put/seqid=0 2024-11-11T07:02:35,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741851_1027 (size=7824) 2024-11-11T07:02:35,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741851_1027 (size=7824) 2024-11-11T07:02:35,932 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee0d7be5efdb40a5b8a9138c8317faad 2024-11-11T07:02:35,936 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ee0d7be5efdb40a5b8a9138c8317faad 2024-11-11T07:02:35,950 DEBUG [M:0;f334e8b0fbdf:35163 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/43c9023ce260490e8a8454547a774259 is 69, key is f334e8b0fbdf,39227,1731308504431/rs:state/1731308504665/Put/seqid=0 2024-11-11T07:02:35,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741852_1028 (size=5156) 2024-11-11T07:02:35,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741852_1028 (size=5156) 2024-11-11T07:02:35,955 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/43c9023ce260490e8a8454547a774259 2024-11-11T07:02:35,973 DEBUG [M:0;f334e8b0fbdf:35163 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2bfe71f1d72f4de4a3164022f904dfbe is 52, key is load_balancer_on/state:d/1731308505256/Put/seqid=0 2024-11-11T07:02:35,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741853_1029 (size=5056) 2024-11-11T07:02:35,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741853_1029 (size=5056) 2024-11-11T07:02:35,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:02:35,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39227-0x10105d52fc20001, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:02:35,977 INFO [RS:0;f334e8b0fbdf:39227 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:02:35,977 INFO [RS:0;f334e8b0fbdf:39227 {}] regionserver.HRegionServer(1031): Exiting; stopping=f334e8b0fbdf,39227,1731308504431; zookeeper connection closed. 2024-11-11T07:02:35,977 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2bfe71f1d72f4de4a3164022f904dfbe 2024-11-11T07:02:35,977 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1dace46d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1dace46d 2024-11-11T07:02:35,978 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T07:02:35,982 DEBUG [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6cf4bac452c94d169824a385e1182733 as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6cf4bac452c94d169824a385e1182733 2024-11-11T07:02:35,987 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6cf4bac452c94d169824a385e1182733, entries=8, sequenceid=121, filesize=5.5 K 2024-11-11T07:02:35,988 DEBUG [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee0d7be5efdb40a5b8a9138c8317faad as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ee0d7be5efdb40a5b8a9138c8317faad 2024-11-11T07:02:35,993 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ee0d7be5efdb40a5b8a9138c8317faad 2024-11-11T07:02:35,993 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ee0d7be5efdb40a5b8a9138c8317faad, entries=14, sequenceid=121, filesize=7.6 K 2024-11-11T07:02:35,994 DEBUG [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/43c9023ce260490e8a8454547a774259 as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/43c9023ce260490e8a8454547a774259 2024-11-11T07:02:35,998 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/43c9023ce260490e8a8454547a774259, entries=1, sequenceid=121, filesize=5.0 K 2024-11-11T07:02:35,999 DEBUG [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2bfe71f1d72f4de4a3164022f904dfbe as hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2bfe71f1d72f4de4a3164022f904dfbe 2024-11-11T07:02:36,003 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33781/user/jenkins/test-data/2b66fdcf-b9fd-1889-df91-c3ea48cfab17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2bfe71f1d72f4de4a3164022f904dfbe, entries=1, sequenceid=121, filesize=4.9 K 2024-11-11T07:02:36,004 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=121, compaction requested=false 2024-11-11T07:02:36,006 INFO [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:02:36,006 DEBUG [M:0;f334e8b0fbdf:35163 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308555888Disabling compacts and flushes for region at 1731308555888Disabling writes for close at 1731308555888Obtaining lock to block concurrent updates at 1731308555888Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731308555888Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1731308555889 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731308555889Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731308555889Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731308555903 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731308555903Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731308555913 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731308555927 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731308555927Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731308555936 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731308555950 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731308555950Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731308555959 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731308555972 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731308555972Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ca856a3: reopening flushed file at 1731308555982 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a50a1f5: reopening flushed file at 1731308555987 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3050f90: reopening flushed file at 1731308555993 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fb2ff09: reopening flushed file at 1731308555998 (+5 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=121, compaction requested=false at 1731308556004 (+6 ms)Writing region close event to WAL at 1731308556005 (+1 ms)Closed at 1731308556006 (+1 ms) 2024-11-11T07:02:36,006 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:36,006 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:36,006 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:36,006 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:36,006 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:02:36,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40977 is added to blk_1073741830_1006 (size=53047) 2024-11-11T07:02:36,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43147 is added to blk_1073741830_1006 (size=53047) 2024-11-11T07:02:36,009 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:02:36,009 INFO [M:0;f334e8b0fbdf:35163 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T07:02:36,009 INFO [M:0;f334e8b0fbdf:35163 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35163 2024-11-11T07:02:36,009 INFO [M:0;f334e8b0fbdf:35163 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:02:36,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:02:36,111 INFO [M:0;f334e8b0fbdf:35163 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:02:36,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35163-0x10105d52fc20000, quorum=127.0.0.1:64017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:02:36,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ecaedd6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:02:36,114 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@222377c1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:02:36,114 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:02:36,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e470e04{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:02:36,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aaa4790{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/hadoop.log.dir/,STOPPED} 2024-11-11T07:02:36,116 WARN [BP-119408927-172.17.0.2-1731308503549 heartbeating to localhost/127.0.0.1:33781 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:02:36,116 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:02:36,116 WARN [BP-119408927-172.17.0.2-1731308503549 heartbeating to localhost/127.0.0.1:33781 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-119408927-172.17.0.2-1731308503549 (Datanode Uuid d0054b2c-3178-4547-b3e9-1f04bc04847e) service to localhost/127.0.0.1:33781 2024-11-11T07:02:36,116 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:02:36,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/data/data3/current/BP-119408927-172.17.0.2-1731308503549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:02:36,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/data/data4/current/BP-119408927-172.17.0.2-1731308503549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:02:36,117 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:02:36,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a3c93e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:02:36,119 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30c9629f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:02:36,119 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:02:36,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@214c2124{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:02:36,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@113ad868{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/hadoop.log.dir/,STOPPED} 2024-11-11T07:02:36,120 WARN [BP-119408927-172.17.0.2-1731308503549 heartbeating to localhost/127.0.0.1:33781 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:02:36,120 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:02:36,120 WARN [BP-119408927-172.17.0.2-1731308503549 heartbeating to localhost/127.0.0.1:33781 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-119408927-172.17.0.2-1731308503549 (Datanode Uuid faec0a49-413e-42a7-85b3-9f9fc5718e38) service to localhost/127.0.0.1:33781 2024-11-11T07:02:36,120 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:02:36,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/data/data1/current/BP-119408927-172.17.0.2-1731308503549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:02:36,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/cluster_cd19cd6a-f412-51cb-7248-d21da25cae5e/data/data2/current/BP-119408927-172.17.0.2-1731308503549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:02:36,121 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:02:36,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ece2bc8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:02:36,130 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@642a12de{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:02:36,130 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:02:36,130 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a7d1da5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:02:36,130 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a75d232{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/hadoop.log.dir/,STOPPED} 2024-11-11T07:02:36,136 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T07:02:36,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T07:02:36,161 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 180) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33781 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/f334e8b0fbdf:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33781 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33781 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33781 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33781 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33781 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33781 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:33781 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33781 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=485 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=40 (was 38) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8283 (was 8590) 2024-11-11T07:02:36,169 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=40, ProcessCount=11, AvailableMemoryMB=8284 2024-11-11T07:02:36,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T07:02:36,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/hadoop.log.dir so I do NOT create it in target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5 2024-11-11T07:02:36,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/908451c1-ccc9-20c9-308f-51f7ce514207/hadoop.tmp.dir so I do NOT create it in target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4, deleteOnExit=true 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/test.cache.data in system properties and HBase conf 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/hadoop.log.dir in system properties and HBase conf 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T07:02:36,170 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:02:36,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T07:02:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T07:02:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:02:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:02:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T07:02:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/nfs.dump.dir in system properties and HBase conf 2024-11-11T07:02:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/java.io.tmpdir in system properties and HBase conf 2024-11-11T07:02:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:02:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T07:02:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T07:02:36,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:36,184 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:02:36,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:36,251 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:02:36,257 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:02:36,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:02:36,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:02:36,258 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:02:36,258 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:02:36,259 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53fc2fb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:02:36,259 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d320e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:02:36,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:36,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47a053ae{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/java.io.tmpdir/jetty-localhost-42889-hadoop-hdfs-3_4_1-tests_jar-_-any-5421319959551592651/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:02:36,374 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@446d84f{HTTP/1.1, (http/1.1)}{localhost:42889} 2024-11-11T07:02:36,374 INFO [Time-limited test {}] server.Server(415): Started @239953ms 2024-11-11T07:02:36,387 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:02:36,452 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:02:36,455 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:02:36,455 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:02:36,455 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:02:36,455 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:02:36,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@256ffd79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:02:36,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a83b9f6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:02:36,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@419b7afd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/java.io.tmpdir/jetty-localhost-34669-hadoop-hdfs-3_4_1-tests_jar-_-any-1379789249853785657/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:02:36,570 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8558d3{HTTP/1.1, (http/1.1)}{localhost:34669} 2024-11-11T07:02:36,570 INFO [Time-limited test {}] server.Server(415): Started @240149ms 2024-11-11T07:02:36,571 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:02:36,599 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:02:36,602 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:02:36,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:02:36,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:02:36,603 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:02:36,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ac7d52f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:02:36,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444d0b71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:02:36,673 WARN [Thread-1946 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/data/data1/current/BP-641971557-172.17.0.2-1731308556201/current, will proceed with Du for space computation calculation, 2024-11-11T07:02:36,673 WARN [Thread-1947 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/data/data2/current/BP-641971557-172.17.0.2-1731308556201/current, will proceed with Du for space computation calculation, 2024-11-11T07:02:36,678 INFO [regionserver/f334e8b0fbdf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:02:36,689 WARN [Thread-1925 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:02:36,692 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42d4d87a14e772cf with lease ID 0x265cc878580fb1b6: Processing first storage report for DS-1b4c5166-82fb-4187-8d64-1231c4c29947 from datanode DatanodeRegistration(127.0.0.1:43571, datanodeUuid=b63cddaf-1c61-4532-8c09-2e62c591ed6a, infoPort=41665, infoSecurePort=0, ipcPort=37989, storageInfo=lv=-57;cid=testClusterID;nsid=915028261;c=1731308556201) 2024-11-11T07:02:36,692 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42d4d87a14e772cf with lease ID 0x265cc878580fb1b6: from storage DS-1b4c5166-82fb-4187-8d64-1231c4c29947 node DatanodeRegistration(127.0.0.1:43571, datanodeUuid=b63cddaf-1c61-4532-8c09-2e62c591ed6a, infoPort=41665, infoSecurePort=0, ipcPort=37989, storageInfo=lv=-57;cid=testClusterID;nsid=915028261;c=1731308556201), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:02:36,692 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42d4d87a14e772cf with lease ID 0x265cc878580fb1b6: Processing first storage report for DS-fa5cebc9-1f84-4e20-9d0e-e55baa6ec463 from datanode DatanodeRegistration(127.0.0.1:43571, datanodeUuid=b63cddaf-1c61-4532-8c09-2e62c591ed6a, infoPort=41665, infoSecurePort=0, ipcPort=37989, storageInfo=lv=-57;cid=testClusterID;nsid=915028261;c=1731308556201) 2024-11-11T07:02:36,692 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42d4d87a14e772cf with lease ID 0x265cc878580fb1b6: from storage DS-fa5cebc9-1f84-4e20-9d0e-e55baa6ec463 node DatanodeRegistration(127.0.0.1:43571, datanodeUuid=b63cddaf-1c61-4532-8c09-2e62c591ed6a, infoPort=41665, infoSecurePort=0, ipcPort=37989, storageInfo=lv=-57;cid=testClusterID;nsid=915028261;c=1731308556201), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:02:36,719 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4328dba6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/java.io.tmpdir/jetty-localhost-41521-hadoop-hdfs-3_4_1-tests_jar-_-any-295710856904595954/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:02:36,719 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@250c37c4{HTTP/1.1, (http/1.1)}{localhost:41521} 2024-11-11T07:02:36,719 INFO [Time-limited test {}] server.Server(415): Started @240298ms 2024-11-11T07:02:36,720 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:02:36,817 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/data/data4/current/BP-641971557-172.17.0.2-1731308556201/current, will proceed with Du for space computation calculation, 2024-11-11T07:02:36,817 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/data/data3/current/BP-641971557-172.17.0.2-1731308556201/current, will proceed with Du for space computation calculation, 2024-11-11T07:02:36,833 WARN [Thread-1961 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:02:36,835 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2142359e2768b449 with lease ID 0x265cc878580fb1b7: Processing first storage report for DS-9cca779f-2e63-4e08-b894-415affb0673d from datanode DatanodeRegistration(127.0.0.1:34399, datanodeUuid=dc8384a2-8ced-4f53-9483-25aa0a6dc1e5, infoPort=45283, infoSecurePort=0, ipcPort=35643, storageInfo=lv=-57;cid=testClusterID;nsid=915028261;c=1731308556201) 2024-11-11T07:02:36,835 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2142359e2768b449 with lease ID 0x265cc878580fb1b7: from storage DS-9cca779f-2e63-4e08-b894-415affb0673d node DatanodeRegistration(127.0.0.1:34399, datanodeUuid=dc8384a2-8ced-4f53-9483-25aa0a6dc1e5, infoPort=45283, infoSecurePort=0, ipcPort=35643, storageInfo=lv=-57;cid=testClusterID;nsid=915028261;c=1731308556201), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:02:36,835 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2142359e2768b449 with lease ID 0x265cc878580fb1b7: Processing first storage report for DS-73f488a0-cecb-4cc0-8d4b-26ac90890636 from datanode DatanodeRegistration(127.0.0.1:34399, datanodeUuid=dc8384a2-8ced-4f53-9483-25aa0a6dc1e5, infoPort=45283, infoSecurePort=0, ipcPort=35643, storageInfo=lv=-57;cid=testClusterID;nsid=915028261;c=1731308556201) 2024-11-11T07:02:36,835 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2142359e2768b449 with lease ID 0x265cc878580fb1b7: from storage DS-73f488a0-cecb-4cc0-8d4b-26ac90890636 node DatanodeRegistration(127.0.0.1:34399, datanodeUuid=dc8384a2-8ced-4f53-9483-25aa0a6dc1e5, infoPort=45283, infoSecurePort=0, ipcPort=35643, storageInfo=lv=-57;cid=testClusterID;nsid=915028261;c=1731308556201), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:02:36,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5 2024-11-11T07:02:36,844 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/zookeeper_0, clientPort=64333, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T07:02:36,844 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64333 2024-11-11T07:02:36,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:02:36,846 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:02:36,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:02:36,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:02:36,854 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925 with version=8 2024-11-11T07:02:36,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/hbase-staging 2024-11-11T07:02:36,856 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:02:36,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:02:36,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:02:36,857 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:02:36,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:02:36,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:02:36,857 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T07:02:36,857 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:02:36,857 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39247 2024-11-11T07:02:36,858 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39247 connecting to ZooKeeper ensemble=127.0.0.1:64333 2024-11-11T07:02:36,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:392470x0, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:02:36,865 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39247-0x10105d5fcbb0000 connected 2024-11-11T07:02:36,883 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:02:36,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:02:36,887 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:02:36,887 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925, hbase.cluster.distributed=false 2024-11-11T07:02:36,888 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:02:36,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39247 2024-11-11T07:02:36,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39247 2024-11-11T07:02:36,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39247 2024-11-11T07:02:36,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39247 2024-11-11T07:02:36,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39247 2024-11-11T07:02:36,905 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:02:36,905 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:02:36,905 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:02:36,905 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:02:36,905 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:02:36,905 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:02:36,905 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T07:02:36,905 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:02:36,906 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37101 2024-11-11T07:02:36,907 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37101 connecting to ZooKeeper ensemble=127.0.0.1:64333 2024-11-11T07:02:36,908 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:02:36,909 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:02:36,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:371010x0, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:02:36,914 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:371010x0, quorum=127.0.0.1:64333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:02:36,914 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37101-0x10105d5fcbb0001 connected 2024-11-11T07:02:36,914 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T07:02:36,914 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T07:02:36,915 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T07:02:36,916 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:02:36,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37101 2024-11-11T07:02:36,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37101 2024-11-11T07:02:36,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37101 2024-11-11T07:02:36,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37101 2024-11-11T07:02:36,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37101 2024-11-11T07:02:36,928 DEBUG [M:0;f334e8b0fbdf:39247 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f334e8b0fbdf:39247 2024-11-11T07:02:36,929 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f334e8b0fbdf,39247,1731308556856 2024-11-11T07:02:36,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:02:36,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:02:36,931 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f334e8b0fbdf,39247,1731308556856 2024-11-11T07:02:36,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:36,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T07:02:36,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:36,933 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T07:02:36,933 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f334e8b0fbdf,39247,1731308556856 from backup master directory 2024-11-11T07:02:36,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f334e8b0fbdf,39247,1731308556856 2024-11-11T07:02:36,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:02:36,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:02:36,937 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:02:36,937 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f334e8b0fbdf,39247,1731308556856 2024-11-11T07:02:36,941 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/hbase.id] with ID: 17852e31-c6c3-42df-ad32-0d0528119a50 2024-11-11T07:02:36,941 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/.tmp/hbase.id 2024-11-11T07:02:36,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:02:36,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:02:36,947 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/.tmp/hbase.id]:[hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/hbase.id] 2024-11-11T07:02:36,957 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:02:36,957 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T07:02:36,958 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T07:02:36,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:36,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:36,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:02:36,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:02:36,966 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T07:02:36,967 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T07:02:36,967 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:02:36,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:02:36,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:02:36,974 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store 2024-11-11T07:02:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:02:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:02:36,981 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:02:36,981 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:02:36,981 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:02:36,981 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:02:36,981 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:02:36,981 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:02:36,981 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:02:36,981 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308556981Disabling compacts and flushes for region at 1731308556981Disabling writes for close at 1731308556981Writing region close event to WAL at 1731308556981Closed at 1731308556981 2024-11-11T07:02:36,982 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/.initializing 2024-11-11T07:02:36,982 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/WALs/f334e8b0fbdf,39247,1731308556856 2024-11-11T07:02:36,984 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C39247%2C1731308556856, suffix=, logDir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/WALs/f334e8b0fbdf,39247,1731308556856, archiveDir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/oldWALs, maxLogs=10 2024-11-11T07:02:36,984 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C39247%2C1731308556856.1731308556984 2024-11-11T07:02:36,989 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/WALs/f334e8b0fbdf,39247,1731308556856/f334e8b0fbdf%2C39247%2C1731308556856.1731308556984 2024-11-11T07:02:36,991 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45283:45283),(127.0.0.1/127.0.0.1:41665:41665)] 2024-11-11T07:02:36,995 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:02:36,995 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:02:36,995 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:36,995 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:36,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:36,997 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T07:02:36,998 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:36,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:02:36,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:36,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T07:02:36,999 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:36,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:02:36,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:37,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T07:02:37,000 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:02:37,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:37,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T07:02:37,002 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:02:37,002 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:37,003 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:37,003 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:37,004 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:37,004 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:37,004 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T07:02:37,005 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:02:37,007 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:02:37,007 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835898, jitterRate=0.06290023028850555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T07:02:37,008 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731308556995Initializing all the Stores at 1731308556996 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308556996Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308556996Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308556996Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308556996Cleaning up temporary data from old regions at 1731308557004 (+8 ms)Region opened successfully at 1731308557008 (+4 ms) 2024-11-11T07:02:37,009 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T07:02:37,013 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fcd5bd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:02:37,014 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T07:02:37,014 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T07:02:37,014 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T07:02:37,014 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T07:02:37,014 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T07:02:37,015 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T07:02:37,015 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T07:02:37,017 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T07:02:37,017 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T07:02:37,018 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T07:02:37,019 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T07:02:37,019 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T07:02:37,020 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T07:02:37,020 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T07:02:37,021 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T07:02:37,024 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T07:02:37,025 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T07:02:37,026 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T07:02:37,027 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T07:02:37,028 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T07:02:37,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:02:37,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:02:37,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:37,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:37,030 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f334e8b0fbdf,39247,1731308556856, sessionid=0x10105d5fcbb0000, setting cluster-up flag (Was=false) 2024-11-11T07:02:37,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:37,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:37,039 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T07:02:37,040 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,39247,1731308556856 2024-11-11T07:02:37,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:37,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:37,048 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T07:02:37,049 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,39247,1731308556856 2024-11-11T07:02:37,050 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T07:02:37,052 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T07:02:37,052 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T07:02:37,052 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T07:02:37,053 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f334e8b0fbdf,39247,1731308556856 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T07:02:37,054 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:02:37,054 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:02:37,054 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:02:37,054 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:02:37,054 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f334e8b0fbdf:0, corePoolSize=10, maxPoolSize=10 2024-11-11T07:02:37,054 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,054 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:02:37,054 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,056 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731308587056 2024-11-11T07:02:37,056 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T07:02:37,056 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T07:02:37,056 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T07:02:37,056 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T07:02:37,056 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T07:02:37,056 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T07:02:37,057 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,057 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:02:37,057 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T07:02:37,057 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T07:02:37,057 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T07:02:37,057 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T07:02:37,057 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T07:02:37,057 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T07:02:37,057 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308557057,5,FailOnTimeoutGroup] 2024-11-11T07:02:37,058 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308557057,5,FailOnTimeoutGroup] 2024-11-11T07:02:37,058 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,058 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T07:02:37,058 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,058 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,058 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,058 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T07:02:37,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:02:37,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:02:37,065 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T07:02:37,065 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925 2024-11-11T07:02:37,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:02:37,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:02:37,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:02:37,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:02:37,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:02:37,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:02:37,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:02:37,075 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:02:37,075 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:02:37,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:02:37,076 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:02:37,076 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:02:37,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:02:37,077 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:02:37,077 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:02:37,078 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:02:37,078 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740 2024-11-11T07:02:37,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740 2024-11-11T07:02:37,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:02:37,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:02:37,080 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:02:37,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:02:37,082 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:02:37,083 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851815, jitterRate=0.08313947916030884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:02:37,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731308557072Initializing all the Stores at 1731308557072Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308557072Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308557072Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308557073 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308557073Cleaning up temporary data from old regions at 1731308557080 (+7 ms)Region opened successfully at 1731308557083 (+3 ms) 2024-11-11T07:02:37,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:02:37,083 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:02:37,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:02:37,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:02:37,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:02:37,084 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:02:37,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308557083Disabling compacts and flushes for region at 1731308557083Disabling writes for close at 1731308557083Writing region close event to WAL at 1731308557084 (+1 ms)Closed at 1731308557084 2024-11-11T07:02:37,085 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:02:37,085 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T07:02:37,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T07:02:37,086 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:02:37,087 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T07:02:37,118 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(746): ClusterId : 17852e31-c6c3-42df-ad32-0d0528119a50 2024-11-11T07:02:37,119 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T07:02:37,120 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T07:02:37,121 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T07:02:37,122 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T07:02:37,123 DEBUG [RS:0;f334e8b0fbdf:37101 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f478eed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:02:37,134 DEBUG [RS:0;f334e8b0fbdf:37101 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f334e8b0fbdf:37101 2024-11-11T07:02:37,135 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T07:02:37,135 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T07:02:37,135 DEBUG [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T07:02:37,135 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(2659): reportForDuty to master=f334e8b0fbdf,39247,1731308556856 with port=37101, startcode=1731308556905 2024-11-11T07:02:37,135 DEBUG [RS:0;f334e8b0fbdf:37101 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T07:02:37,138 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47403, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T07:02:37,138 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39247 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:37,138 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39247 {}] master.ServerManager(517): Registering regionserver=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:37,140 DEBUG [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925 2024-11-11T07:02:37,140 DEBUG [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36089 2024-11-11T07:02:37,140 DEBUG [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T07:02:37,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:02:37,144 DEBUG [RS:0;f334e8b0fbdf:37101 {}] zookeeper.ZKUtil(111): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:37,144 WARN [RS:0;f334e8b0fbdf:37101 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:02:37,144 INFO [RS:0;f334e8b0fbdf:37101 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:02:37,144 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f334e8b0fbdf,37101,1731308556905] 2024-11-11T07:02:37,144 DEBUG [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:37,147 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T07:02:37,149 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T07:02:37,149 INFO [RS:0;f334e8b0fbdf:37101 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T07:02:37,149 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,149 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T07:02:37,150 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T07:02:37,150 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:02:37,150 DEBUG [RS:0;f334e8b0fbdf:37101 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:02:37,151 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,151 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,151 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,151 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,151 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,151 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,37101,1731308556905-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:02:37,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:37,186 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T07:02:37,186 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,37101,1731308556905-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,186 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,186 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.Replication(171): f334e8b0fbdf,37101,1731308556905 started 2024-11-11T07:02:37,201 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,201 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(1482): Serving as f334e8b0fbdf,37101,1731308556905, RpcServer on f334e8b0fbdf/172.17.0.2:37101, sessionid=0x10105d5fcbb0001 2024-11-11T07:02:37,201 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T07:02:37,201 DEBUG [RS:0;f334e8b0fbdf:37101 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:37,201 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,37101,1731308556905' 2024-11-11T07:02:37,201 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T07:02:37,202 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T07:02:37,202 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T07:02:37,202 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T07:02:37,202 DEBUG [RS:0;f334e8b0fbdf:37101 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:37,202 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,37101,1731308556905' 2024-11-11T07:02:37,202 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T07:02:37,202 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T07:02:37,203 DEBUG [RS:0;f334e8b0fbdf:37101 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T07:02:37,203 INFO [RS:0;f334e8b0fbdf:37101 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T07:02:37,203 INFO [RS:0;f334e8b0fbdf:37101 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T07:02:37,237 WARN [f334e8b0fbdf:39247 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T07:02:37,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:37,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:37,304 INFO [RS:0;f334e8b0fbdf:37101 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C37101%2C1731308556905, suffix=, logDir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905, archiveDir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/oldWALs, maxLogs=32 2024-11-11T07:02:37,305 INFO [RS:0;f334e8b0fbdf:37101 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37101%2C1731308556905.1731308557305 2024-11-11T07:02:37,310 INFO [RS:0;f334e8b0fbdf:37101 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905/f334e8b0fbdf%2C37101%2C1731308556905.1731308557305 2024-11-11T07:02:37,311 DEBUG [RS:0;f334e8b0fbdf:37101 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45283:45283),(127.0.0.1/127.0.0.1:41665:41665)] 2024-11-11T07:02:37,487 DEBUG [f334e8b0fbdf:39247 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T07:02:37,488 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:37,489 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,37101,1731308556905, state=OPENING 2024-11-11T07:02:37,491 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T07:02:37,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:37,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:02:37,492 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:02:37,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,37101,1731308556905}] 2024-11-11T07:02:37,492 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:02:37,492 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:02:37,645 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T07:02:37,647 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50301, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T07:02:37,650 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T07:02:37,650 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:02:37,652 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C37101%2C1731308556905.meta, suffix=.meta, logDir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905, archiveDir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/oldWALs, maxLogs=32 2024-11-11T07:02:37,652 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37101%2C1731308556905.meta.1731308557652.meta 2024-11-11T07:02:37,656 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905/f334e8b0fbdf%2C37101%2C1731308556905.meta.1731308557652.meta 2024-11-11T07:02:37,657 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41665:41665),(127.0.0.1/127.0.0.1:45283:45283)] 2024-11-11T07:02:37,658 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:02:37,658 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T07:02:37,658 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T07:02:37,658 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T07:02:37,658 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T07:02:37,658 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:02:37,659 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T07:02:37,659 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T07:02:37,660 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:02:37,660 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:02:37,661 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:02:37,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:02:37,662 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:02:37,662 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,662 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:02:37,662 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:02:37,662 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:02:37,663 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:02:37,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:02:37,663 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:02:37,663 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,664 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:02:37,664 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:02:37,664 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740 2024-11-11T07:02:37,665 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740 2024-11-11T07:02:37,666 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:02:37,666 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:02:37,667 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:02:37,668 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:02:37,669 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835850, jitterRate=0.0628388524055481}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:02:37,669 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T07:02:37,669 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731308557659Writing region info on filesystem at 1731308557659Initializing all the Stores at 1731308557659Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308557659Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308557660 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308557660Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308557660Cleaning up temporary data from old regions at 1731308557666 (+6 ms)Running coprocessor post-open hooks at 1731308557669 (+3 ms)Region opened successfully at 1731308557669 2024-11-11T07:02:37,670 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731308557645 2024-11-11T07:02:37,672 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T07:02:37,672 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T07:02:37,673 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:37,674 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,37101,1731308556905, state=OPEN 2024-11-11T07:02:37,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:02:37,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:02:37,680 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:37,680 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:02:37,680 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:02:37,682 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T07:02:37,682 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,37101,1731308556905 in 188 msec 2024-11-11T07:02:37,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T07:02:37,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 597 msec 2024-11-11T07:02:37,685 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:02:37,685 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T07:02:37,686 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:02:37,686 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,37101,1731308556905, seqNum=-1] 2024-11-11T07:02:37,686 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:02:37,687 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42335, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:02:37,692 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 639 msec 2024-11-11T07:02:37,692 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731308557692, completionTime=-1 2024-11-11T07:02:37,692 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T07:02:37,692 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T07:02:37,694 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-11T07:02:37,694 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731308617694 2024-11-11T07:02:37,694 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731308677694 2024-11-11T07:02:37,694 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-11T07:02:37,694 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39247,1731308556856-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,694 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39247,1731308556856-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,694 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39247,1731308556856-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,695 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f334e8b0fbdf:39247, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,695 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,695 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,696 DEBUG [master/f334e8b0fbdf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T07:02:37,698 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.761sec 2024-11-11T07:02:37,698 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T07:02:37,698 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T07:02:37,699 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T07:02:37,699 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T07:02:37,699 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T07:02:37,699 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39247,1731308556856-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:02:37,699 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39247,1731308556856-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T07:02:37,701 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T07:02:37,701 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T07:02:37,701 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,39247,1731308556856-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:02:37,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181dda1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:02:37,719 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f334e8b0fbdf,39247,-1 for getting cluster id 2024-11-11T07:02:37,719 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T07:02:37,720 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '17852e31-c6c3-42df-ad32-0d0528119a50' 2024-11-11T07:02:37,720 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T07:02:37,720 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "17852e31-c6c3-42df-ad32-0d0528119a50" 2024-11-11T07:02:37,721 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ace19d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:02:37,721 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f334e8b0fbdf,39247,-1] 2024-11-11T07:02:37,721 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T07:02:37,721 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:02:37,722 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47356, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T07:02:37,722 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25f949b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:02:37,723 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:02:37,723 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,37101,1731308556905, seqNum=-1] 2024-11-11T07:02:37,724 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:02:37,724 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60726, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:02:37,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f334e8b0fbdf,39247,1731308556856 2024-11-11T07:02:37,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:02:37,728 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T07:02:37,728 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T07:02:37,729 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is f334e8b0fbdf,39247,1731308556856 2024-11-11T07:02:37,729 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2195603a 2024-11-11T07:02:37,729 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T07:02:37,730 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47360, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T07:02:37,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39247 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T07:02:37,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39247 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T07:02:37,731 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39247 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T07:02:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39247 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-11T07:02:37,733 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T07:02:37,733 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:37,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39247 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-11T07:02:37,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39247 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T07:02:37,734 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T07:02:37,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741835_1011 (size=381) 2024-11-11T07:02:37,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741835_1011 (size=381) 2024-11-11T07:02:37,742 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1de601fba810c6b237c13985fef558b6, NAME => 'TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925 2024-11-11T07:02:37,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741836_1012 (size=64) 2024-11-11T07:02:37,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741836_1012 (size=64) 2024-11-11T07:02:37,748 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:02:37,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 1de601fba810c6b237c13985fef558b6, disabling compactions & flushes 2024-11-11T07:02:37,749 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:37,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:37,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. after waiting 0 ms 2024-11-11T07:02:37,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:37,749 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:37,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1de601fba810c6b237c13985fef558b6: Waiting for close lock at 1731308557748Disabling compacts and flushes for region at 1731308557748Disabling writes for close at 1731308557749 (+1 ms)Writing region close event to WAL at 1731308557749Closed at 1731308557749 2024-11-11T07:02:37,750 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T07:02:37,750 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731308557750"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731308557750"}]},"ts":"1731308557750"} 2024-11-11T07:02:37,752 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T07:02:37,753 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T07:02:37,754 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731308557754"}]},"ts":"1731308557754"} 2024-11-11T07:02:37,756 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-11T07:02:37,756 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1de601fba810c6b237c13985fef558b6, ASSIGN}] 2024-11-11T07:02:37,757 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1de601fba810c6b237c13985fef558b6, ASSIGN 2024-11-11T07:02:37,758 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1de601fba810c6b237c13985fef558b6, ASSIGN; state=OFFLINE, location=f334e8b0fbdf,37101,1731308556905; forceNewPlan=false, retain=false 2024-11-11T07:02:37,909 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1de601fba810c6b237c13985fef558b6, regionState=OPENING, regionLocation=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:37,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1de601fba810c6b237c13985fef558b6, ASSIGN because future has completed 2024-11-11T07:02:37,912 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1de601fba810c6b237c13985fef558b6, server=f334e8b0fbdf,37101,1731308556905}] 2024-11-11T07:02:38,069 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:38,069 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1de601fba810c6b237c13985fef558b6, NAME => 'TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:02:38,069 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,069 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:02:38,069 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,069 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,070 INFO [StoreOpener-1de601fba810c6b237c13985fef558b6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,072 INFO [StoreOpener-1de601fba810c6b237c13985fef558b6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1de601fba810c6b237c13985fef558b6 columnFamilyName info 2024-11-11T07:02:38,072 DEBUG [StoreOpener-1de601fba810c6b237c13985fef558b6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:38,072 INFO [StoreOpener-1de601fba810c6b237c13985fef558b6-1 {}] regionserver.HStore(327): Store=1de601fba810c6b237c13985fef558b6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:02:38,072 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,073 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,073 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,074 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,074 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,075 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,077 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:02:38,077 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1de601fba810c6b237c13985fef558b6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873302, jitterRate=0.11046193540096283}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T07:02:38,077 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:38,078 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1de601fba810c6b237c13985fef558b6: Running coprocessor pre-open hook at 1731308558069Writing region info on filesystem at 1731308558069Initializing all the Stores at 1731308558070 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308558070Cleaning up temporary data from old regions at 1731308558074 (+4 ms)Running coprocessor post-open hooks at 1731308558077 (+3 ms)Region opened successfully at 1731308558078 (+1 ms) 2024-11-11T07:02:38,079 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., pid=6, masterSystemTime=1731308558065 2024-11-11T07:02:38,081 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:38,081 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:38,082 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1de601fba810c6b237c13985fef558b6, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:38,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1de601fba810c6b237c13985fef558b6, server=f334e8b0fbdf,37101,1731308556905 because future has completed 2024-11-11T07:02:38,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T07:02:38,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1de601fba810c6b237c13985fef558b6, server=f334e8b0fbdf,37101,1731308556905 in 173 msec 2024-11-11T07:02:38,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T07:02:38,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1de601fba810c6b237c13985fef558b6, ASSIGN in 331 msec 2024-11-11T07:02:38,091 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T07:02:38,091 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731308558091"}]},"ts":"1731308558091"} 2024-11-11T07:02:38,093 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-11T07:02:38,094 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T07:02:38,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 363 msec 2024-11-11T07:02:38,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:38,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:38,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:39,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:39,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:39,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:40,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:40,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:40,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:40,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:40,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:41,221 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T07:02:41,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:41,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,260 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:41,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:41,605 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-11T07:02:41,605 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T07:02:41,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T07:02:42,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:42,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:42,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:43,147 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T07:02:43,148 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-11T07:02:43,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:43,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:43,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:44,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:44,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:44,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:45,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:45,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:45,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:46,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:46,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:46,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:47,108 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T07:02:47,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:47,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:47,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:47,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:47,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39247 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T07:02:47,776 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-11T07:02:47,776 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-11T07:02:47,779 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-11T07:02:47,779 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:47,781 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., hostname=f334e8b0fbdf,37101,1731308556905, seqNum=2] 2024-11-11T07:02:47,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:47,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1de601fba810c6b237c13985fef558b6 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T07:02:47,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/79ce210bc27949f980162dddeef23f7f is 1080, key is row0001/info:/1731308567782/Put/seqid=0 2024-11-11T07:02:47,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741837_1013 (size=12509) 2024-11-11T07:02:47,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741837_1013 (size=12509) 2024-11-11T07:02:47,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/79ce210bc27949f980162dddeef23f7f 2024-11-11T07:02:47,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/79ce210bc27949f980162dddeef23f7f as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79ce210bc27949f980162dddeef23f7f 2024-11-11T07:02:47,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79ce210bc27949f980162dddeef23f7f, entries=7, sequenceid=11, filesize=12.2 K 2024-11-11T07:02:47,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 1de601fba810c6b237c13985fef558b6 in 37ms, sequenceid=11, compaction requested=false 2024-11-11T07:02:47,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1de601fba810c6b237c13985fef558b6: 2024-11-11T07:02:47,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:47,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1de601fba810c6b237c13985fef558b6 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-11T07:02:47,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/2d814cb30f904f2394235029fb2f0f12 is 1080, key is row0008/info:/1731308567793/Put/seqid=0 2024-11-11T07:02:47,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741838_1014 (size=29761) 2024-11-11T07:02:47,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741838_1014 (size=29761) 2024-11-11T07:02:47,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/2d814cb30f904f2394235029fb2f0f12 2024-11-11T07:02:47,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/2d814cb30f904f2394235029fb2f0f12 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/2d814cb30f904f2394235029fb2f0f12 2024-11-11T07:02:47,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/2d814cb30f904f2394235029fb2f0f12, entries=23, sequenceid=37, filesize=29.1 K 2024-11-11T07:02:47,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 1de601fba810c6b237c13985fef558b6 in 20ms, sequenceid=37, compaction requested=false 2024-11-11T07:02:47,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1de601fba810c6b237c13985fef558b6: 2024-11-11T07:02:47,850 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-11T07:02:47,850 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:47,850 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/2d814cb30f904f2394235029fb2f0f12 because midkey is the same as first or last row 2024-11-11T07:02:48,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:48,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:48,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:49,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:49,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:49,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:49,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:49,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1de601fba810c6b237c13985fef558b6 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T07:02:49,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/79f0a91d3b404a7d86da1f9c7c9a6f8b is 1080, key is row0031/info:/1731308567831/Put/seqid=0 2024-11-11T07:02:49,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741839_1015 (size=12509) 2024-11-11T07:02:49,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741839_1015 (size=12509) 2024-11-11T07:02:49,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/79f0a91d3b404a7d86da1f9c7c9a6f8b 2024-11-11T07:02:49,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/79f0a91d3b404a7d86da1f9c7c9a6f8b as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79f0a91d3b404a7d86da1f9c7c9a6f8b 2024-11-11T07:02:49,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79f0a91d3b404a7d86da1f9c7c9a6f8b, entries=7, sequenceid=47, filesize=12.2 K 2024-11-11T07:02:49,866 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 1de601fba810c6b237c13985fef558b6 in 23ms, sequenceid=47, compaction requested=true 2024-11-11T07:02:49,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1de601fba810c6b237c13985fef558b6: 2024-11-11T07:02:49,866 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-11T07:02:49,866 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:49,866 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/2d814cb30f904f2394235029fb2f0f12 because midkey is the same as first or last row 2024-11-11T07:02:49,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1de601fba810c6b237c13985fef558b6:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T07:02:49,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:02:49,866 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T07:02:49,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:49,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1de601fba810c6b237c13985fef558b6 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-11T07:02:49,867 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T07:02:49,867 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1541): 1de601fba810c6b237c13985fef558b6/info is initiating minor compaction (all files) 2024-11-11T07:02:49,867 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1de601fba810c6b237c13985fef558b6/info in TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:49,868 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79ce210bc27949f980162dddeef23f7f, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/2d814cb30f904f2394235029fb2f0f12, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79f0a91d3b404a7d86da1f9c7c9a6f8b] into tmpdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp, totalSize=53.5 K 2024-11-11T07:02:49,868 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 79ce210bc27949f980162dddeef23f7f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731308567782 2024-11-11T07:02:49,869 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2d814cb30f904f2394235029fb2f0f12, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731308567793 2024-11-11T07:02:49,869 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 79f0a91d3b404a7d86da1f9c7c9a6f8b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731308567831 2024-11-11T07:02:49,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/a47842ca56984cccb52e25e63885f0eb is 1080, key is row0038/info:/1731308569844/Put/seqid=0 2024-11-11T07:02:49,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741840_1016 (size=21141) 2024-11-11T07:02:49,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741840_1016 (size=21141) 2024-11-11T07:02:49,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/a47842ca56984cccb52e25e63885f0eb 2024-11-11T07:02:49,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/a47842ca56984cccb52e25e63885f0eb as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/a47842ca56984cccb52e25e63885f0eb 2024-11-11T07:02:49,888 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1de601fba810c6b237c13985fef558b6#info#compaction#57 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:02:49,889 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/47b697b3af6d41a797ab28d3daedb94d is 1080, key is row0001/info:/1731308567782/Put/seqid=0 2024-11-11T07:02:49,891 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/a47842ca56984cccb52e25e63885f0eb, entries=15, sequenceid=65, filesize=20.6 K 2024-11-11T07:02:49,891 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 1de601fba810c6b237c13985fef558b6 in 24ms, sequenceid=65, compaction requested=false 2024-11-11T07:02:49,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1de601fba810c6b237c13985fef558b6: 2024-11-11T07:02:49,892 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.1 K, sizeToCheck=16.0 K 2024-11-11T07:02:49,892 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:49,892 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/2d814cb30f904f2394235029fb2f0f12 because midkey is the same as first or last row 2024-11-11T07:02:49,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741841_1017 (size=44978) 2024-11-11T07:02:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741841_1017 (size=44978) 2024-11-11T07:02:49,903 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/47b697b3af6d41a797ab28d3daedb94d as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/47b697b3af6d41a797ab28d3daedb94d 2024-11-11T07:02:49,909 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1de601fba810c6b237c13985fef558b6/info of 1de601fba810c6b237c13985fef558b6 into 47b697b3af6d41a797ab28d3daedb94d(size=43.9 K), total size for store is 64.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:02:49,909 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1de601fba810c6b237c13985fef558b6: 2024-11-11T07:02:49,909 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., storeName=1de601fba810c6b237c13985fef558b6/info, priority=13, startTime=1731308569866; duration=0sec 2024-11-11T07:02:49,909 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-11T07:02:49,909 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:49,909 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/47b697b3af6d41a797ab28d3daedb94d because midkey is the same as first or last row 2024-11-11T07:02:49,909 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-11T07:02:49,909 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:49,909 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/47b697b3af6d41a797ab28d3daedb94d because midkey is the same as first or last row 2024-11-11T07:02:49,909 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-11T07:02:49,909 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:49,910 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/47b697b3af6d41a797ab28d3daedb94d because midkey is the same as first or last row 2024-11-11T07:02:49,910 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:02:49,910 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1de601fba810c6b237c13985fef558b6:info 2024-11-11T07:02:50,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:50,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:50,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:51,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:51,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:51,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:51,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1de601fba810c6b237c13985fef558b6 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-11T07:02:51,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/67fe8a2a0f814617bf0be3ee7ae700b3 is 1080, key is row0053/info:/1731308569868/Put/seqid=0 2024-11-11T07:02:51,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741842_1018 (size=18987) 2024-11-11T07:02:51,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741842_1018 (size=18987) 2024-11-11T07:02:51,903 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/67fe8a2a0f814617bf0be3ee7ae700b3 2024-11-11T07:02:51,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/67fe8a2a0f814617bf0be3ee7ae700b3 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/67fe8a2a0f814617bf0be3ee7ae700b3 2024-11-11T07:02:51,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/67fe8a2a0f814617bf0be3ee7ae700b3, entries=13, sequenceid=82, filesize=18.5 K 2024-11-11T07:02:51,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 1de601fba810c6b237c13985fef558b6 in 22ms, sequenceid=82, compaction requested=true 2024-11-11T07:02:51,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1de601fba810c6b237c13985fef558b6: 2024-11-11T07:02:51,915 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-11T07:02:51,915 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:51,915 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/47b697b3af6d41a797ab28d3daedb94d because midkey is the same as first or last row 2024-11-11T07:02:51,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1de601fba810c6b237c13985fef558b6:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T07:02:51,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:02:51,916 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T07:02:51,917 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T07:02:51,917 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1541): 1de601fba810c6b237c13985fef558b6/info is initiating minor compaction (all files) 2024-11-11T07:02:51,917 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1de601fba810c6b237c13985fef558b6/info in TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:51,917 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/47b697b3af6d41a797ab28d3daedb94d, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/a47842ca56984cccb52e25e63885f0eb, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/67fe8a2a0f814617bf0be3ee7ae700b3] into tmpdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp, totalSize=83.1 K 2024-11-11T07:02:51,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:51,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1de601fba810c6b237c13985fef558b6 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-11T07:02:51,917 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 47b697b3af6d41a797ab28d3daedb94d, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731308567782 2024-11-11T07:02:51,918 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting a47842ca56984cccb52e25e63885f0eb, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=65, earliestPutTs=1731308569844 2024-11-11T07:02:51,918 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 67fe8a2a0f814617bf0be3ee7ae700b3, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731308569868 2024-11-11T07:02:51,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/367ecfe6f29c4488abc1cf09f88a0952 is 1080, key is row0066/info:/1731308571894/Put/seqid=0 2024-11-11T07:02:51,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741843_1019 (size=20064) 2024-11-11T07:02:51,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741843_1019 (size=20064) 2024-11-11T07:02:51,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/367ecfe6f29c4488abc1cf09f88a0952 2024-11-11T07:02:51,936 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1de601fba810c6b237c13985fef558b6#info#compaction#60 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:02:51,937 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/3b8e533f3bc4496a817810e5c29c8c06 is 1080, key is row0001/info:/1731308567782/Put/seqid=0 2024-11-11T07:02:51,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/367ecfe6f29c4488abc1cf09f88a0952 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/367ecfe6f29c4488abc1cf09f88a0952 2024-11-11T07:02:51,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/367ecfe6f29c4488abc1cf09f88a0952, entries=14, sequenceid=99, filesize=19.6 K 2024-11-11T07:02:51,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for 1de601fba810c6b237c13985fef558b6 in 27ms, sequenceid=99, compaction requested=false 2024-11-11T07:02:51,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1de601fba810c6b237c13985fef558b6: 2024-11-11T07:02:51,945 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-11-11T07:02:51,945 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:51,945 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/47b697b3af6d41a797ab28d3daedb94d because midkey is the same as first or last row 2024-11-11T07:02:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:51,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1de601fba810c6b237c13985fef558b6 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-11T07:02:51,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/f4b31e7bf7084deeb648e45b30bdd138 is 1080, key is row0080/info:/1731308571918/Put/seqid=0 2024-11-11T07:02:51,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741844_1020 (size=75378) 2024-11-11T07:02:51,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741844_1020 (size=75378) 2024-11-11T07:02:51,961 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/3b8e533f3bc4496a817810e5c29c8c06 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/3b8e533f3bc4496a817810e5c29c8c06 2024-11-11T07:02:51,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741845_1021 (size=21141) 2024-11-11T07:02:51,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741845_1021 (size=21141) 2024-11-11T07:02:51,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/f4b31e7bf7084deeb648e45b30bdd138 2024-11-11T07:02:51,968 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1de601fba810c6b237c13985fef558b6/info of 1de601fba810c6b237c13985fef558b6 into 3b8e533f3bc4496a817810e5c29c8c06(size=73.6 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:02:51,968 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1de601fba810c6b237c13985fef558b6: 2024-11-11T07:02:51,968 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., storeName=1de601fba810c6b237c13985fef558b6/info, priority=13, startTime=1731308571915; duration=0sec 2024-11-11T07:02:51,969 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-11T07:02:51,969 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:51,969 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-11T07:02:51,969 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:51,969 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-11T07:02:51,969 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:51,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/f4b31e7bf7084deeb648e45b30bdd138 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/f4b31e7bf7084deeb648e45b30bdd138 2024-11-11T07:02:51,970 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:02:51,970 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:02:51,970 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1de601fba810c6b237c13985fef558b6:info 2024-11-11T07:02:51,972 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39247 {}] assignment.AssignmentManager(1355): Split request from f334e8b0fbdf,37101,1731308556905, parent={ENCODED => 1de601fba810c6b237c13985fef558b6, NAME => 'TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-11T07:02:51,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/f4b31e7bf7084deeb648e45b30bdd138, entries=15, sequenceid=117, filesize=20.6 K 2024-11-11T07:02:51,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=2.10 KB/2152 for 1de601fba810c6b237c13985fef558b6 in 29ms, sequenceid=117, compaction requested=true 2024-11-11T07:02:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1de601fba810c6b237c13985fef558b6: 2024-11-11T07:02:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-11T07:02:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-11T07:02:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-11T07:02:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T07:02:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-11T07:02:51,979 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39247 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:51,983 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39247 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=b6429c4a74a02857045bfc2e68f9de65, daughterB=b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:51,985 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=b6429c4a74a02857045bfc2e68f9de65, daughterB=b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:51,985 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=b6429c4a74a02857045bfc2e68f9de65, daughterB=b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:51,985 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=b6429c4a74a02857045bfc2e68f9de65, daughterB=b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:51,986 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39247 {}] assignment.AssignmentManager(1355): Split request from f334e8b0fbdf,37101,1731308556905, parent={ENCODED => 1de601fba810c6b237c13985fef558b6, NAME => 'TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-11T07:02:51,987 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39247 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:51,988 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39247 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=dfbef0c388c8404d22dd464a8289e896, daughterB=d0c97780b9c0dbf99577cedd098a59e4 2024-11-11T07:02:51,988 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=dfbef0c388c8404d22dd464a8289e896, daughterB=d0c97780b9c0dbf99577cedd098a59e4 held by pid=7 2024-11-11T07:02:51,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1de601fba810c6b237c13985fef558b6, UNASSIGN}] 2024-11-11T07:02:51,997 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-11T07:02:51,997 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1de601fba810c6b237c13985fef558b6, UNASSIGN 2024-11-11T07:02:51,997 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=dfbef0c388c8404d22dd464a8289e896, daughterB=d0c97780b9c0dbf99577cedd098a59e4 2024-11-11T07:02:51,999 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1de601fba810c6b237c13985fef558b6, regionState=CLOSING, regionLocation=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:52,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1de601fba810c6b237c13985fef558b6, UNASSIGN because future has completed 2024-11-11T07:02:52,002 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-11T07:02:52,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1de601fba810c6b237c13985fef558b6, server=f334e8b0fbdf,37101,1731308556905}] 2024-11-11T07:02:52,161 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,161 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-11T07:02:52,162 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing 1de601fba810c6b237c13985fef558b6, disabling compactions & flushes 2024-11-11T07:02:52,162 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:52,162 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:52,162 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. after waiting 0 ms 2024-11-11T07:02:52,162 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:52,162 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing 1de601fba810c6b237c13985fef558b6 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-11T07:02:52,166 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/960764f342ec4e868e11f7dfe500cf9a is 1080, key is row0095/info:/1731308571947/Put/seqid=0 2024-11-11T07:02:52,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741846_1022 (size=7112) 2024-11-11T07:02:52,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741846_1022 (size=7112) 2024-11-11T07:02:52,177 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/960764f342ec4e868e11f7dfe500cf9a 2024-11-11T07:02:52,183 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/.tmp/info/960764f342ec4e868e11f7dfe500cf9a as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/960764f342ec4e868e11f7dfe500cf9a 2024-11-11T07:02:52,188 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/960764f342ec4e868e11f7dfe500cf9a, entries=2, sequenceid=123, filesize=6.9 K 2024-11-11T07:02:52,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:52,189 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 1de601fba810c6b237c13985fef558b6 in 27ms, sequenceid=123, compaction requested=true 2024-11-11T07:02:52,190 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79ce210bc27949f980162dddeef23f7f, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/2d814cb30f904f2394235029fb2f0f12, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/47b697b3af6d41a797ab28d3daedb94d, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79f0a91d3b404a7d86da1f9c7c9a6f8b, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/a47842ca56984cccb52e25e63885f0eb, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/67fe8a2a0f814617bf0be3ee7ae700b3] to archive 2024-11-11T07:02:52,190 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T07:02:52,192 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79ce210bc27949f980162dddeef23f7f to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79ce210bc27949f980162dddeef23f7f 2024-11-11T07:02:52,193 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/2d814cb30f904f2394235029fb2f0f12 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/2d814cb30f904f2394235029fb2f0f12 2024-11-11T07:02:52,194 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/47b697b3af6d41a797ab28d3daedb94d to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/47b697b3af6d41a797ab28d3daedb94d 2024-11-11T07:02:52,195 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79f0a91d3b404a7d86da1f9c7c9a6f8b to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/79f0a91d3b404a7d86da1f9c7c9a6f8b 2024-11-11T07:02:52,196 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/a47842ca56984cccb52e25e63885f0eb to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/a47842ca56984cccb52e25e63885f0eb 2024-11-11T07:02:52,197 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/67fe8a2a0f814617bf0be3ee7ae700b3 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/67fe8a2a0f814617bf0be3ee7ae700b3 2024-11-11T07:02:52,203 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-11T07:02:52,203 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. 2024-11-11T07:02:52,203 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for 1de601fba810c6b237c13985fef558b6: Waiting for close lock at 1731308572162Running coprocessor pre-close hooks at 1731308572162Disabling compacts and flushes for region at 1731308572162Disabling writes for close at 1731308572162Obtaining lock to block concurrent updates at 1731308572162Preparing flush snapshotting stores in 1de601fba810c6b237c13985fef558b6 at 1731308572162Finished memstore snapshotting TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., syncing WAL and waiting on mvcc, flushsize=dataSize=2152, getHeapSize=2544, getOffHeapSize=0, getCellsCount=2 at 1731308572162Flushing stores of TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. at 1731308572163 (+1 ms)Flushing 1de601fba810c6b237c13985fef558b6/info: creating writer at 1731308572163Flushing 1de601fba810c6b237c13985fef558b6/info: appending metadata at 1731308572166 (+3 ms)Flushing 1de601fba810c6b237c13985fef558b6/info: closing flushed file at 1731308572166Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d8847e5: reopening flushed file at 1731308572182 (+16 ms)Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 1de601fba810c6b237c13985fef558b6 in 27ms, sequenceid=123, compaction requested=true at 1731308572189 (+7 ms)Writing region close event to WAL at 1731308572200 (+11 ms)Running coprocessor post-close hooks at 1731308572203 (+3 ms)Closed at 1731308572203 2024-11-11T07:02:52,205 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,206 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1de601fba810c6b237c13985fef558b6, regionState=CLOSED 2024-11-11T07:02:52,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1de601fba810c6b237c13985fef558b6, server=f334e8b0fbdf,37101,1731308556905 because future has completed 2024-11-11T07:02:52,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-11T07:02:52,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure 1de601fba810c6b237c13985fef558b6, server=f334e8b0fbdf,37101,1731308556905 in 206 msec 2024-11-11T07:02:52,213 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-11T07:02:52,213 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1de601fba810c6b237c13985fef558b6, UNASSIGN in 218 msec 2024-11-11T07:02:52,221 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:52,224 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=1de601fba810c6b237c13985fef558b6, threads=4 2024-11-11T07:02:52,227 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/367ecfe6f29c4488abc1cf09f88a0952 for region: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,227 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/960764f342ec4e868e11f7dfe500cf9a for region: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,227 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/f4b31e7bf7084deeb648e45b30bdd138 for region: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,227 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/3b8e533f3bc4496a817810e5c29c8c06 for region: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,238 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/f4b31e7bf7084deeb648e45b30bdd138, top=true 2024-11-11T07:02:52,239 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/960764f342ec4e868e11f7dfe500cf9a, top=true 2024-11-11T07:02:52,241 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/367ecfe6f29c4488abc1cf09f88a0952, top=true 2024-11-11T07:02:52,246 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-960764f342ec4e868e11f7dfe500cf9a for child: b88c1b6447f023a04f2710e15f1fe9de, parent: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,246 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/960764f342ec4e868e11f7dfe500cf9a for region: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,253 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-f4b31e7bf7084deeb648e45b30bdd138 for child: b88c1b6447f023a04f2710e15f1fe9de, parent: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,253 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/f4b31e7bf7084deeb648e45b30bdd138 for region: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,254 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-367ecfe6f29c4488abc1cf09f88a0952 for child: b88c1b6447f023a04f2710e15f1fe9de, parent: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,254 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/367ecfe6f29c4488abc1cf09f88a0952 for region: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741847_1023 (size=27) 2024-11-11T07:02:52,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741847_1023 (size=27) 2024-11-11T07:02:52,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:52,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741848_1024 (size=27) 2024-11-11T07:02:52,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741848_1024 (size=27) 2024-11-11T07:02:52,268 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/3b8e533f3bc4496a817810e5c29c8c06 for region: 1de601fba810c6b237c13985fef558b6 2024-11-11T07:02:52,270 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 1de601fba810c6b237c13985fef558b6 Daughter A: [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6] storefiles, Daughter B: [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-367ecfe6f29c4488abc1cf09f88a0952, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-960764f342ec4e868e11f7dfe500cf9a, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-f4b31e7bf7084deeb648e45b30bdd138] storefiles. 2024-11-11T07:02:52,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:52,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741849_1025 (size=71) 2024-11-11T07:02:52,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741849_1025 (size=71) 2024-11-11T07:02:52,279 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:52,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741850_1026 (size=71) 2024-11-11T07:02:52,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741850_1026 (size=71) 2024-11-11T07:02:52,291 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:52,302 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-11T07:02:52,304 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-11T07:02:52,307 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731308572307"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731308572307"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731308572307"}]},"ts":"1731308572307"} 2024-11-11T07:02:52,308 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731308572307"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731308572307"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731308572307"}]},"ts":"1731308572307"} 2024-11-11T07:02:52,308 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731308572307"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731308572307"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731308572307"}]},"ts":"1731308572307"} 2024-11-11T07:02:52,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6429c4a74a02857045bfc2e68f9de65, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b88c1b6447f023a04f2710e15f1fe9de, ASSIGN}] 2024-11-11T07:02:52,331 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b88c1b6447f023a04f2710e15f1fe9de, ASSIGN 2024-11-11T07:02:52,331 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6429c4a74a02857045bfc2e68f9de65, ASSIGN 2024-11-11T07:02:52,332 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b88c1b6447f023a04f2710e15f1fe9de, ASSIGN; state=SPLITTING_NEW, location=f334e8b0fbdf,37101,1731308556905; forceNewPlan=false, retain=false 2024-11-11T07:02:52,332 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6429c4a74a02857045bfc2e68f9de65, ASSIGN; state=SPLITTING_NEW, location=f334e8b0fbdf,37101,1731308556905; forceNewPlan=false, retain=false 2024-11-11T07:02:52,482 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=b88c1b6447f023a04f2710e15f1fe9de, regionState=OPENING, regionLocation=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:52,482 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=b6429c4a74a02857045bfc2e68f9de65, regionState=OPENING, regionLocation=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:52,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b88c1b6447f023a04f2710e15f1fe9de, ASSIGN because future has completed 2024-11-11T07:02:52,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure b88c1b6447f023a04f2710e15f1fe9de, server=f334e8b0fbdf,37101,1731308556905}] 2024-11-11T07:02:52,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6429c4a74a02857045bfc2e68f9de65, ASSIGN because future has completed 2024-11-11T07:02:52,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure b6429c4a74a02857045bfc2e68f9de65, server=f334e8b0fbdf,37101,1731308556905}] 2024-11-11T07:02:52,641 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:02:52,641 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => b88c1b6447f023a04f2710e15f1fe9de, NAME => 'TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-11T07:02:52,642 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,642 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:02:52,642 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,642 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,643 INFO [StoreOpener-b88c1b6447f023a04f2710e15f1fe9de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,644 INFO [StoreOpener-b88c1b6447f023a04f2710e15f1fe9de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b88c1b6447f023a04f2710e15f1fe9de columnFamilyName info 2024-11-11T07:02:52,644 DEBUG [StoreOpener-b88c1b6447f023a04f2710e15f1fe9de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:52,655 DEBUG [StoreOpener-b88c1b6447f023a04f2710e15f1fe9de-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6->hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/3b8e533f3bc4496a817810e5c29c8c06-top 2024-11-11T07:02:52,659 DEBUG [StoreOpener-b88c1b6447f023a04f2710e15f1fe9de-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-367ecfe6f29c4488abc1cf09f88a0952 2024-11-11T07:02:52,663 DEBUG [StoreOpener-b88c1b6447f023a04f2710e15f1fe9de-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-960764f342ec4e868e11f7dfe500cf9a 2024-11-11T07:02:52,666 DEBUG [StoreOpener-b88c1b6447f023a04f2710e15f1fe9de-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-f4b31e7bf7084deeb648e45b30bdd138 2024-11-11T07:02:52,667 INFO [StoreOpener-b88c1b6447f023a04f2710e15f1fe9de-1 {}] regionserver.HStore(327): Store=b88c1b6447f023a04f2710e15f1fe9de/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:02:52,667 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,667 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,668 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,669 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,669 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,671 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,671 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened b88c1b6447f023a04f2710e15f1fe9de; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=814101, jitterRate=0.03518398106098175}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T07:02:52,671 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:02:52,672 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for b88c1b6447f023a04f2710e15f1fe9de: Running coprocessor pre-open hook at 1731308572642Writing region info on filesystem at 1731308572642Initializing all the Stores at 1731308572643 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308572643Cleaning up temporary data from old regions at 1731308572669 (+26 ms)Running coprocessor post-open hooks at 1731308572671 (+2 ms)Region opened successfully at 1731308572672 (+1 ms) 2024-11-11T07:02:52,673 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de., pid=13, masterSystemTime=1731308572638 2024-11-11T07:02:52,673 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store b88c1b6447f023a04f2710e15f1fe9de:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T07:02:52,673 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:02:52,673 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T07:02:52,675 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:02:52,675 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1541): b88c1b6447f023a04f2710e15f1fe9de/info is initiating minor compaction (all files) 2024-11-11T07:02:52,675 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b88c1b6447f023a04f2710e15f1fe9de/info in TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:02:52,675 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6->hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/3b8e533f3bc4496a817810e5c29c8c06-top, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-367ecfe6f29c4488abc1cf09f88a0952, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-f4b31e7bf7084deeb648e45b30bdd138, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-960764f342ec4e868e11f7dfe500cf9a] into tmpdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp, totalSize=120.8 K 2024-11-11T07:02:52,675 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:02:52,675 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:02:52,675 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. 2024-11-11T07:02:52,676 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731308567782 2024-11-11T07:02:52,676 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => b6429c4a74a02857045bfc2e68f9de65, NAME => 'TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-11T07:02:52,676 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,676 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:02:52,676 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=b88c1b6447f023a04f2710e15f1fe9de, regionState=OPEN, openSeqNum=127, regionLocation=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:52,676 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-367ecfe6f29c4488abc1cf09f88a0952, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731308571894 2024-11-11T07:02:52,676 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,676 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,676 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-f4b31e7bf7084deeb648e45b30bdd138, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731308571918 2024-11-11T07:02:52,677 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-960764f342ec4e868e11f7dfe500cf9a, keycount=2, bloomtype=ROW, size=6.9 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731308571947 2024-11-11T07:02:52,677 INFO [StoreOpener-b6429c4a74a02857045bfc2e68f9de65-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,678 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-11T07:02:52,678 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-11T07:02:52,678 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-11T07:02:52,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure b88c1b6447f023a04f2710e15f1fe9de, server=f334e8b0fbdf,37101,1731308556905 because future has completed 2024-11-11T07:02:52,678 INFO [StoreOpener-b6429c4a74a02857045bfc2e68f9de65-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b6429c4a74a02857045bfc2e68f9de65 columnFamilyName info 2024-11-11T07:02:52,678 DEBUG [StoreOpener-b6429c4a74a02857045bfc2e68f9de65-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:02:52,688 DEBUG [StoreOpener-b6429c4a74a02857045bfc2e68f9de65-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6->hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/3b8e533f3bc4496a817810e5c29c8c06-bottom 2024-11-11T07:02:52,689 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-11T07:02:52,689 INFO [StoreOpener-b6429c4a74a02857045bfc2e68f9de65-1 {}] regionserver.HStore(327): Store=b6429c4a74a02857045bfc2e68f9de65/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:02:52,689 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure b88c1b6447f023a04f2710e15f1fe9de, server=f334e8b0fbdf,37101,1731308556905 in 194 msec 2024-11-11T07:02:52,689 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,690 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,691 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,691 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b88c1b6447f023a04f2710e15f1fe9de, ASSIGN in 360 msec 2024-11-11T07:02:52,691 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,691 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,693 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,694 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened b6429c4a74a02857045bfc2e68f9de65; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836364, jitterRate=0.06349208950996399}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T07:02:52,694 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:02:52,695 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for b6429c4a74a02857045bfc2e68f9de65: Running coprocessor pre-open hook at 1731308572676Writing region info on filesystem at 1731308572676Initializing all the Stores at 1731308572677 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308572677Cleaning up temporary data from old regions at 1731308572691 (+14 ms)Running coprocessor post-open hooks at 1731308572694 (+3 ms)Region opened successfully at 1731308572694 2024-11-11T07:02:52,696 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65., pid=14, masterSystemTime=1731308572638 2024-11-11T07:02:52,696 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store b6429c4a74a02857045bfc2e68f9de65:info, priority=-2147483648, current under compaction store size is 2 2024-11-11T07:02:52,696 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:02:52,696 DEBUG [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-11T07:02:52,697 INFO [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. 2024-11-11T07:02:52,697 DEBUG [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] regionserver.HStore(1541): b6429c4a74a02857045bfc2e68f9de65/info is initiating minor compaction (all files) 2024-11-11T07:02:52,697 INFO [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b6429c4a74a02857045bfc2e68f9de65/info in TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. 2024-11-11T07:02:52,697 INFO [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6->hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/3b8e533f3bc4496a817810e5c29c8c06-bottom] into tmpdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/.tmp, totalSize=73.6 K 2024-11-11T07:02:52,698 DEBUG [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] compactions.Compactor(225): Compacting 3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731308567782 2024-11-11T07:02:52,699 DEBUG [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. 2024-11-11T07:02:52,699 INFO [RS_OPEN_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. 2024-11-11T07:02:52,700 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=b6429c4a74a02857045bfc2e68f9de65, regionState=OPEN, openSeqNum=127, regionLocation=f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:52,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/info/0668c39213ef46ac9e4ae740e09d1842 is 193, key is TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de./info:regioninfo/1731308572676/Put/seqid=0 2024-11-11T07:02:52,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure b6429c4a74a02857045bfc2e68f9de65, server=f334e8b0fbdf,37101,1731308556905 because future has completed 2024-11-11T07:02:52,706 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=11 2024-11-11T07:02:52,706 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure b6429c4a74a02857045bfc2e68f9de65, server=f334e8b0fbdf,37101,1731308556905 in 216 msec 2024-11-11T07:02:52,708 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-11T07:02:52,708 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6429c4a74a02857045bfc2e68f9de65, ASSIGN in 377 msec 2024-11-11T07:02:52,710 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=b6429c4a74a02857045bfc2e68f9de65, daughterB=b88c1b6447f023a04f2710e15f1fe9de in 729 msec 2024-11-11T07:02:52,710 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=dfbef0c388c8404d22dd464a8289e896, daughterB=d0c97780b9c0dbf99577cedd098a59e4 2024-11-11T07:02:52,710 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=dfbef0c388c8404d22dd464a8289e896, daughterB=d0c97780b9c0dbf99577cedd098a59e4 2024-11-11T07:02:52,710 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=dfbef0c388c8404d22dd464a8289e896, daughterB=d0c97780b9c0dbf99577cedd098a59e4 2024-11-11T07:02:52,711 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => 1de601fba810c6b237c13985fef558b6, NAME => 'TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-11T07:02:52,712 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1de601fba810c6b237c13985fef558b6, daughterA=dfbef0c388c8404d22dd464a8289e896, daughterB=d0c97780b9c0dbf99577cedd098a59e4 in 724 msec 2024-11-11T07:02:52,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741851_1027 (size=9882) 2024-11-11T07:02:52,719 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b88c1b6447f023a04f2710e15f1fe9de#info#compaction#64 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:02:52,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741851_1027 (size=9882) 2024-11-11T07:02:52,720 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/7a1fb57e47e94687bbecb8fd6d0fb01a is 1080, key is row0062/info:/1731308569886/Put/seqid=0 2024-11-11T07:02:52,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/info/0668c39213ef46ac9e4ae740e09d1842 2024-11-11T07:02:52,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741852_1028 (size=43081) 2024-11-11T07:02:52,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741852_1028 (size=43081) 2024-11-11T07:02:52,732 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/7a1fb57e47e94687bbecb8fd6d0fb01a as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/7a1fb57e47e94687bbecb8fd6d0fb01a 2024-11-11T07:02:52,734 INFO [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b6429c4a74a02857045bfc2e68f9de65#info#compaction#65 average throughput is 15.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:02:52,734 DEBUG [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/.tmp/info/225a9fb740e642ff9e9f6a7db1a995de is 1080, key is row0001/info:/1731308567782/Put/seqid=0 2024-11-11T07:02:52,738 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in b88c1b6447f023a04f2710e15f1fe9de/info of b88c1b6447f023a04f2710e15f1fe9de into 7a1fb57e47e94687bbecb8fd6d0fb01a(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:02:52,738 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:02:52,738 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de., storeName=b88c1b6447f023a04f2710e15f1fe9de/info, priority=12, startTime=1731308572673; duration=0sec 2024-11-11T07:02:52,738 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:02:52,739 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b88c1b6447f023a04f2710e15f1fe9de:info 2024-11-11T07:02:52,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741853_1029 (size=70862) 2024-11-11T07:02:52,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741853_1029 (size=70862) 2024-11-11T07:02:52,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/ns/b77baef7a3f7484ba3645a453347135f is 43, key is default/ns:d/1731308557688/Put/seqid=0 2024-11-11T07:02:52,745 DEBUG [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/.tmp/info/225a9fb740e642ff9e9f6a7db1a995de as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/info/225a9fb740e642ff9e9f6a7db1a995de 2024-11-11T07:02:52,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741854_1030 (size=5153) 2024-11-11T07:02:52,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741854_1030 (size=5153) 2024-11-11T07:02:52,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/ns/b77baef7a3f7484ba3645a453347135f 2024-11-11T07:02:52,752 INFO [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in b6429c4a74a02857045bfc2e68f9de65/info of b6429c4a74a02857045bfc2e68f9de65 into 225a9fb740e642ff9e9f6a7db1a995de(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:02:52,752 DEBUG [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b6429c4a74a02857045bfc2e68f9de65: 2024-11-11T07:02:52,752 INFO [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65., storeName=b6429c4a74a02857045bfc2e68f9de65/info, priority=15, startTime=1731308572696; duration=0sec 2024-11-11T07:02:52,752 DEBUG [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:02:52,752 DEBUG [RS:0;f334e8b0fbdf:37101-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b6429c4a74a02857045bfc2e68f9de65:info 2024-11-11T07:02:52,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/table/ab4661604ae542a8ab5d8250a00ddecd is 65, key is TestLogRolling-testLogRolling/table:state/1731308558091/Put/seqid=0 2024-11-11T07:02:52,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741855_1031 (size=5340) 2024-11-11T07:02:52,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741855_1031 (size=5340) 2024-11-11T07:02:52,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/table/ab4661604ae542a8ab5d8250a00ddecd 2024-11-11T07:02:52,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/info/0668c39213ef46ac9e4ae740e09d1842 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/info/0668c39213ef46ac9e4ae740e09d1842 2024-11-11T07:02:52,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/info/0668c39213ef46ac9e4ae740e09d1842, entries=30, sequenceid=17, filesize=9.7 K 2024-11-11T07:02:52,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/ns/b77baef7a3f7484ba3645a453347135f as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/ns/b77baef7a3f7484ba3645a453347135f 2024-11-11T07:02:52,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/ns/b77baef7a3f7484ba3645a453347135f, entries=2, sequenceid=17, filesize=5.0 K 2024-11-11T07:02:52,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/table/ab4661604ae542a8ab5d8250a00ddecd as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/table/ab4661604ae542a8ab5d8250a00ddecd 2024-11-11T07:02:52,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/table/ab4661604ae542a8ab5d8250a00ddecd, entries=2, sequenceid=17, filesize=5.2 K 2024-11-11T07:02:52,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 115ms, sequenceid=17, compaction requested=false 2024-11-11T07:02:52,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T07:02:53,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:53,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:53,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:53,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:60726 deadline: 1731308583950, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. is not online on f334e8b0fbdf,37101,1731308556905 2024-11-11T07:02:53,980 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., hostname=f334e8b0fbdf,37101,1731308556905, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., hostname=f334e8b0fbdf,37101,1731308556905, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. is not online on f334e8b0fbdf,37101,1731308556905 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T07:02:53,981 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., hostname=f334e8b0fbdf,37101,1731308556905, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6. is not online on f334e8b0fbdf,37101,1731308556905 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T07:02:53,981 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731308557730.1de601fba810c6b237c13985fef558b6., hostname=f334e8b0fbdf,37101,1731308556905, seqNum=2 from cache 2024-11-11T07:02:54,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:54,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:54,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:55,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:55,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:55,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:56,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:56,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:56,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:57,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:57,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:57,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:57,734 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T07:02:57,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:57,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T07:02:58,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:58,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:58,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:59,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:59,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:02:59,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:00,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:00,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:00,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:01,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:01,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:01,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:02,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:02,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:02,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:03,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:03,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:03,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:04,021 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de., hostname=f334e8b0fbdf,37101,1731308556905, seqNum=127] 2024-11-11T07:03:04,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:04,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T07:03:04,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/81b11231060e433fab3b48aee92a4622 is 1080, key is row0097/info:/1731308584022/Put/seqid=0 2024-11-11T07:03:04,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741856_1032 (size=12516) 2024-11-11T07:03:04,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741856_1032 (size=12516) 2024-11-11T07:03:04,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/81b11231060e433fab3b48aee92a4622 2024-11-11T07:03:04,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/81b11231060e433fab3b48aee92a4622 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/81b11231060e433fab3b48aee92a4622 2024-11-11T07:03:04,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/81b11231060e433fab3b48aee92a4622, entries=7, sequenceid=137, filesize=12.2 K 2024-11-11T07:03:04,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for b88c1b6447f023a04f2710e15f1fe9de in 24ms, sequenceid=137, compaction requested=false 2024-11-11T07:03:04,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:04,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:04,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-11T07:03:04,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/188de2e434984323827287976f3142c0 is 1080, key is row0104/info:/1731308584033/Put/seqid=0 2024-11-11T07:03:04,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741857_1033 (size=22238) 2024-11-11T07:03:04,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741857_1033 (size=22238) 2024-11-11T07:03:04,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/188de2e434984323827287976f3142c0 2024-11-11T07:03:04,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/188de2e434984323827287976f3142c0 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/188de2e434984323827287976f3142c0 2024-11-11T07:03:04,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/188de2e434984323827287976f3142c0, entries=16, sequenceid=156, filesize=21.7 K 2024-11-11T07:03:04,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for b88c1b6447f023a04f2710e15f1fe9de in 21ms, sequenceid=156, compaction requested=true 2024-11-11T07:03:04,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:04,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b88c1b6447f023a04f2710e15f1fe9de:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T07:03:04,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:04,078 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T07:03:04,079 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77835 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T07:03:04,079 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1541): b88c1b6447f023a04f2710e15f1fe9de/info is initiating minor compaction (all files) 2024-11-11T07:03:04,079 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b88c1b6447f023a04f2710e15f1fe9de/info in TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:04,079 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/7a1fb57e47e94687bbecb8fd6d0fb01a, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/81b11231060e433fab3b48aee92a4622, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/188de2e434984323827287976f3142c0] into tmpdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp, totalSize=76.0 K 2024-11-11T07:03:04,080 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7a1fb57e47e94687bbecb8fd6d0fb01a, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731308569886 2024-11-11T07:03:04,080 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81b11231060e433fab3b48aee92a4622, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731308584022 2024-11-11T07:03:04,081 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 188de2e434984323827287976f3142c0, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731308584033 2024-11-11T07:03:04,093 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b88c1b6447f023a04f2710e15f1fe9de#info#compaction#70 average throughput is 29.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:03:04,094 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/758cf46f3b2d42f3b983de63c0ca707c is 1080, key is row0062/info:/1731308569886/Put/seqid=0 2024-11-11T07:03:04,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741858_1034 (size=68045) 2024-11-11T07:03:04,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741858_1034 (size=68045) 2024-11-11T07:03:04,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:04,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:04,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:04,507 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/758cf46f3b2d42f3b983de63c0ca707c as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/758cf46f3b2d42f3b983de63c0ca707c 2024-11-11T07:03:04,512 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b88c1b6447f023a04f2710e15f1fe9de/info of b88c1b6447f023a04f2710e15f1fe9de into 758cf46f3b2d42f3b983de63c0ca707c(size=66.5 K), total size for store is 66.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:03:04,512 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:04,512 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de., storeName=b88c1b6447f023a04f2710e15f1fe9de/info, priority=13, startTime=1731308584078; duration=0sec 2024-11-11T07:03:04,512 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:04,512 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b88c1b6447f023a04f2710e15f1fe9de:info 2024-11-11T07:03:05,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:05,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:05,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:06,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-11T07:03:06,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/fac289cbb55d4ae0937d66cbf492b746 is 1080, key is row0120/info:/1731308584058/Put/seqid=0 2024-11-11T07:03:06,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741859_1035 (size=15750) 2024-11-11T07:03:06,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741859_1035 (size=15750) 2024-11-11T07:03:06,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/fac289cbb55d4ae0937d66cbf492b746 2024-11-11T07:03:06,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/fac289cbb55d4ae0937d66cbf492b746 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/fac289cbb55d4ae0937d66cbf492b746 2024-11-11T07:03:06,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/fac289cbb55d4ae0937d66cbf492b746, entries=10, sequenceid=170, filesize=15.4 K 2024-11-11T07:03:06,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for b88c1b6447f023a04f2710e15f1fe9de in 21ms, sequenceid=170, compaction requested=false 2024-11-11T07:03:06,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:06,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-11T07:03:06,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/a29532a3c1d74b0a8822c2928ba0d9aa is 1080, key is row0130/info:/1731308586077/Put/seqid=0 2024-11-11T07:03:06,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741860_1036 (size=19000) 2024-11-11T07:03:06,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741860_1036 (size=19000) 2024-11-11T07:03:06,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/a29532a3c1d74b0a8822c2928ba0d9aa 2024-11-11T07:03:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/a29532a3c1d74b0a8822c2928ba0d9aa as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a29532a3c1d74b0a8822c2928ba0d9aa 2024-11-11T07:03:06,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a29532a3c1d74b0a8822c2928ba0d9aa, entries=13, sequenceid=186, filesize=18.6 K 2024-11-11T07:03:06,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=13.66 KB/13988 for b88c1b6447f023a04f2710e15f1fe9de in 21ms, sequenceid=186, compaction requested=true 2024-11-11T07:03:06,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:06,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b88c1b6447f023a04f2710e15f1fe9de:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T07:03:06,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:06,119 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T07:03:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:06,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-11T07:03:06,120 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T07:03:06,120 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1541): b88c1b6447f023a04f2710e15f1fe9de/info is initiating minor compaction (all files) 2024-11-11T07:03:06,120 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b88c1b6447f023a04f2710e15f1fe9de/info in TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:06,120 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/758cf46f3b2d42f3b983de63c0ca707c, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/fac289cbb55d4ae0937d66cbf492b746, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a29532a3c1d74b0a8822c2928ba0d9aa] into tmpdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp, totalSize=100.4 K 2024-11-11T07:03:06,121 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 758cf46f3b2d42f3b983de63c0ca707c, keycount=58, bloomtype=ROW, size=66.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731308569886 2024-11-11T07:03:06,121 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting fac289cbb55d4ae0937d66cbf492b746, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731308584058 2024-11-11T07:03:06,121 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting a29532a3c1d74b0a8822c2928ba0d9aa, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1731308586077 2024-11-11T07:03:06,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/8de7465ffd1842209caefc0e43d8bf3d is 1080, key is row0143/info:/1731308586098/Put/seqid=0 2024-11-11T07:03:06,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741861_1037 (size=20078) 2024-11-11T07:03:06,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741861_1037 (size=20078) 2024-11-11T07:03:06,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/8de7465ffd1842209caefc0e43d8bf3d 2024-11-11T07:03:06,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/8de7465ffd1842209caefc0e43d8bf3d as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/8de7465ffd1842209caefc0e43d8bf3d 2024-11-11T07:03:06,142 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b88c1b6447f023a04f2710e15f1fe9de#info#compaction#74 average throughput is 41.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:03:06,142 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/c73b43f345f3400287324f467a4795d3 is 1080, key is row0062/info:/1731308569886/Put/seqid=0 2024-11-11T07:03:06,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741862_1038 (size=93018) 2024-11-11T07:03:06,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741862_1038 (size=93018) 2024-11-11T07:03:06,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/8de7465ffd1842209caefc0e43d8bf3d, entries=14, sequenceid=203, filesize=19.6 K 2024-11-11T07:03:06,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=4.20 KB/4304 for b88c1b6447f023a04f2710e15f1fe9de in 28ms, sequenceid=203, compaction requested=false 2024-11-11T07:03:06,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:06,151 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/c73b43f345f3400287324f467a4795d3 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/c73b43f345f3400287324f467a4795d3 2024-11-11T07:03:06,156 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b88c1b6447f023a04f2710e15f1fe9de/info of b88c1b6447f023a04f2710e15f1fe9de into c73b43f345f3400287324f467a4795d3(size=90.8 K), total size for store is 110.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:03:06,156 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:06,156 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de., storeName=b88c1b6447f023a04f2710e15f1fe9de/info, priority=13, startTime=1731308586118; duration=0sec 2024-11-11T07:03:06,156 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:06,156 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b88c1b6447f023a04f2710e15f1fe9de:info 2024-11-11T07:03:06,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:06,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:06,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:06,841 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T07:03:07,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:07,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:07,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:08,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T07:03:08,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/3aed57127e6542b8a0b040c76940b232 is 1080, key is row0157/info:/1731308586121/Put/seqid=0 2024-11-11T07:03:08,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741863_1039 (size=12516) 2024-11-11T07:03:08,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741863_1039 (size=12516) 2024-11-11T07:03:08,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/3aed57127e6542b8a0b040c76940b232 2024-11-11T07:03:08,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/3aed57127e6542b8a0b040c76940b232 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3aed57127e6542b8a0b040c76940b232 2024-11-11T07:03:08,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3aed57127e6542b8a0b040c76940b232, entries=7, sequenceid=214, filesize=12.2 K 2024-11-11T07:03:08,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for b88c1b6447f023a04f2710e15f1fe9de in 26ms, sequenceid=214, compaction requested=true 2024-11-11T07:03:08,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:08,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b88c1b6447f023a04f2710e15f1fe9de:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T07:03:08,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:08,158 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T07:03:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:08,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-11T07:03:08,159 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 125612 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T07:03:08,159 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1541): b88c1b6447f023a04f2710e15f1fe9de/info is initiating minor compaction (all files) 2024-11-11T07:03:08,159 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b88c1b6447f023a04f2710e15f1fe9de/info in TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:08,159 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/c73b43f345f3400287324f467a4795d3, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/8de7465ffd1842209caefc0e43d8bf3d, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3aed57127e6542b8a0b040c76940b232] into tmpdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp, totalSize=122.7 K 2024-11-11T07:03:08,160 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting c73b43f345f3400287324f467a4795d3, keycount=81, bloomtype=ROW, size=90.8 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1731308569886 2024-11-11T07:03:08,160 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8de7465ffd1842209caefc0e43d8bf3d, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1731308586098 2024-11-11T07:03:08,161 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3aed57127e6542b8a0b040c76940b232, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731308586121 2024-11-11T07:03:08,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/0d56d25fc16c4c40964e061edb9f764e is 1080, key is row0164/info:/1731308588133/Put/seqid=0 2024-11-11T07:03:08,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741864_1040 (size=23316) 2024-11-11T07:03:08,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741864_1040 (size=23316) 2024-11-11T07:03:08,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/0d56d25fc16c4c40964e061edb9f764e 2024-11-11T07:03:08,172 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b88c1b6447f023a04f2710e15f1fe9de#info#compaction#77 average throughput is 52.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:03:08,172 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/a97ce4b0b8d54caeb7bd75b4bf158540 is 1080, key is row0062/info:/1731308569886/Put/seqid=0 2024-11-11T07:03:08,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/0d56d25fc16c4c40964e061edb9f764e as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0d56d25fc16c4c40964e061edb9f764e 2024-11-11T07:03:08,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741865_1041 (size=115762) 2024-11-11T07:03:08,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741865_1041 (size=115762) 2024-11-11T07:03:08,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0d56d25fc16c4c40964e061edb9f764e, entries=17, sequenceid=234, filesize=22.8 K 2024-11-11T07:03:08,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=11.56 KB/11836 for b88c1b6447f023a04f2710e15f1fe9de in 22ms, sequenceid=234, compaction requested=false 2024-11-11T07:03:08,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:08,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T07:03:08,182 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/a97ce4b0b8d54caeb7bd75b4bf158540 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a97ce4b0b8d54caeb7bd75b4bf158540 2024-11-11T07:03:08,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/4aa6a9b823914a3fa89d01f9bfaec05e is 1080, key is row0181/info:/1731308588160/Put/seqid=0 2024-11-11T07:03:08,188 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b88c1b6447f023a04f2710e15f1fe9de/info of b88c1b6447f023a04f2710e15f1fe9de into a97ce4b0b8d54caeb7bd75b4bf158540(size=113.0 K), total size for store is 135.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:03:08,188 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:08,188 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de., storeName=b88c1b6447f023a04f2710e15f1fe9de/info, priority=13, startTime=1731308588158; duration=0sec 2024-11-11T07:03:08,188 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:08,188 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b88c1b6447f023a04f2710e15f1fe9de:info 2024-11-11T07:03:08,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741866_1042 (size=17906) 2024-11-11T07:03:08,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741866_1042 (size=17906) 2024-11-11T07:03:08,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/4aa6a9b823914a3fa89d01f9bfaec05e 2024-11-11T07:03:08,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/4aa6a9b823914a3fa89d01f9bfaec05e as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/4aa6a9b823914a3fa89d01f9bfaec05e 2024-11-11T07:03:08,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:08,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/4aa6a9b823914a3fa89d01f9bfaec05e, entries=12, sequenceid=249, filesize=17.5 K 2024-11-11T07:03:08,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for b88c1b6447f023a04f2710e15f1fe9de in 19ms, sequenceid=249, compaction requested=true 2024-11-11T07:03:08,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:08,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b88c1b6447f023a04f2710e15f1fe9de:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T07:03:08,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:08,200 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T07:03:08,201 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 156984 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T07:03:08,201 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1541): b88c1b6447f023a04f2710e15f1fe9de/info is initiating minor compaction (all files) 2024-11-11T07:03:08,201 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b88c1b6447f023a04f2710e15f1fe9de/info in TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:08,201 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a97ce4b0b8d54caeb7bd75b4bf158540, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0d56d25fc16c4c40964e061edb9f764e, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/4aa6a9b823914a3fa89d01f9bfaec05e] into tmpdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp, totalSize=153.3 K 2024-11-11T07:03:08,201 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting a97ce4b0b8d54caeb7bd75b4bf158540, keycount=102, bloomtype=ROW, size=113.0 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731308569886 2024-11-11T07:03:08,202 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0d56d25fc16c4c40964e061edb9f764e, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1731308588133 2024-11-11T07:03:08,202 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4aa6a9b823914a3fa89d01f9bfaec05e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1731308588160 2024-11-11T07:03:08,212 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b88c1b6447f023a04f2710e15f1fe9de#info#compaction#79 average throughput is 67.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:03:08,213 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/0f67a172801e453cafc948be6d94122d is 1080, key is row0062/info:/1731308569886/Put/seqid=0 2024-11-11T07:03:08,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741867_1043 (size=147331) 2024-11-11T07:03:08,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741867_1043 (size=147331) 2024-11-11T07:03:08,221 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/0f67a172801e453cafc948be6d94122d as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0f67a172801e453cafc948be6d94122d 2024-11-11T07:03:08,226 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b88c1b6447f023a04f2710e15f1fe9de/info of b88c1b6447f023a04f2710e15f1fe9de into 0f67a172801e453cafc948be6d94122d(size=143.9 K), total size for store is 143.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:03:08,227 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:08,227 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de., storeName=b88c1b6447f023a04f2710e15f1fe9de/info, priority=13, startTime=1731308588200; duration=0sec 2024-11-11T07:03:08,227 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:08,227 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b88c1b6447f023a04f2710e15f1fe9de:info 2024-11-11T07:03:08,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:08,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:09,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:09,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:09,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:10,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:10,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T07:03:10,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/b7e28fa05e884352983bb3087ad30365 is 1080, key is row0193/info:/1731308590183/Put/seqid=0 2024-11-11T07:03:10,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:10,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741868_1044 (size=12521) 2024-11-11T07:03:10,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741868_1044 (size=12521) 2024-11-11T07:03:10,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/b7e28fa05e884352983bb3087ad30365 2024-11-11T07:03:10,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/b7e28fa05e884352983bb3087ad30365 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/b7e28fa05e884352983bb3087ad30365 2024-11-11T07:03:10,221 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/b7e28fa05e884352983bb3087ad30365, entries=7, sequenceid=261, filesize=12.2 K 2024-11-11T07:03:10,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for b88c1b6447f023a04f2710e15f1fe9de in 30ms, sequenceid=261, compaction requested=false 2024-11-11T07:03:10,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:10,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:10,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-11T07:03:10,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/266e6210b053410ea636046ef3fdf2b6 is 1080, key is row0200/info:/1731308590193/Put/seqid=0 2024-11-11T07:03:10,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741869_1045 (size=25491) 2024-11-11T07:03:10,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741869_1045 (size=25491) 2024-11-11T07:03:10,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:10,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:10,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/266e6210b053410ea636046ef3fdf2b6 2024-11-11T07:03:10,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/266e6210b053410ea636046ef3fdf2b6 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/266e6210b053410ea636046ef3fdf2b6 2024-11-11T07:03:10,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/266e6210b053410ea636046ef3fdf2b6, entries=19, sequenceid=283, filesize=24.9 K 2024-11-11T07:03:10,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=6.30 KB/6456 for b88c1b6447f023a04f2710e15f1fe9de in 422ms, sequenceid=283, compaction requested=true 2024-11-11T07:03:10,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:10,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b88c1b6447f023a04f2710e15f1fe9de:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T07:03:10,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:10,645 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T07:03:10,646 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 185343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T07:03:10,646 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1541): b88c1b6447f023a04f2710e15f1fe9de/info is initiating minor compaction (all files) 2024-11-11T07:03:10,646 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b88c1b6447f023a04f2710e15f1fe9de/info in TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:10,646 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0f67a172801e453cafc948be6d94122d, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/b7e28fa05e884352983bb3087ad30365, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/266e6210b053410ea636046ef3fdf2b6] into tmpdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp, totalSize=181.0 K 2024-11-11T07:03:10,646 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0f67a172801e453cafc948be6d94122d, keycount=131, bloomtype=ROW, size=143.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1731308569886 2024-11-11T07:03:10,647 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7e28fa05e884352983bb3087ad30365, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731308590183 2024-11-11T07:03:10,647 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 266e6210b053410ea636046ef3fdf2b6, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1731308590193 2024-11-11T07:03:10,658 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b88c1b6447f023a04f2710e15f1fe9de#info#compaction#82 average throughput is 53.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:03:10,658 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/9968071498654564a9d3bbfa990bb1d7 is 1080, key is row0062/info:/1731308569886/Put/seqid=0 2024-11-11T07:03:10,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741870_1046 (size=175493) 2024-11-11T07:03:10,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741870_1046 (size=175493) 2024-11-11T07:03:10,667 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/9968071498654564a9d3bbfa990bb1d7 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/9968071498654564a9d3bbfa990bb1d7 2024-11-11T07:03:10,672 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b88c1b6447f023a04f2710e15f1fe9de/info of b88c1b6447f023a04f2710e15f1fe9de into 9968071498654564a9d3bbfa990bb1d7(size=171.4 K), total size for store is 171.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:03:10,673 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:10,673 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de., storeName=b88c1b6447f023a04f2710e15f1fe9de/info, priority=13, startTime=1731308590645; duration=0sec 2024-11-11T07:03:10,673 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:10,673 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b88c1b6447f023a04f2710e15f1fe9de:info 2024-11-11T07:03:11,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:11,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:11,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:12,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:12,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:12,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T07:03:12,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/3b9e0d03052a4dcdb7e8cd0559938f1f is 1080, key is row0219/info:/1731308590224/Put/seqid=0 2024-11-11T07:03:12,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741871_1047 (size=12523) 2024-11-11T07:03:12,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741871_1047 (size=12523) 2024-11-11T07:03:12,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/3b9e0d03052a4dcdb7e8cd0559938f1f 2024-11-11T07:03:12,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/3b9e0d03052a4dcdb7e8cd0559938f1f as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b9e0d03052a4dcdb7e8cd0559938f1f 2024-11-11T07:03:12,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b9e0d03052a4dcdb7e8cd0559938f1f, entries=7, sequenceid=294, filesize=12.2 K 2024-11-11T07:03:12,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for b88c1b6447f023a04f2710e15f1fe9de in 21ms, sequenceid=294, compaction requested=false 2024-11-11T07:03:12,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:12,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:12,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-11T07:03:12,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/d613a10f6b0a4619b4159bce7a5c4b11 is 1080, key is row0226/info:/1731308592237/Put/seqid=0 2024-11-11T07:03:12,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741872_1048 (size=20092) 2024-11-11T07:03:12,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:12,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741872_1048 (size=20092) 2024-11-11T07:03:12,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/d613a10f6b0a4619b4159bce7a5c4b11 2024-11-11T07:03:12,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/d613a10f6b0a4619b4159bce7a5c4b11 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/d613a10f6b0a4619b4159bce7a5c4b11 2024-11-11T07:03:12,279 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/d613a10f6b0a4619b4159bce7a5c4b11, entries=14, sequenceid=311, filesize=19.6 K 2024-11-11T07:03:12,280 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for b88c1b6447f023a04f2710e15f1fe9de in 21ms, sequenceid=311, compaction requested=true 2024-11-11T07:03:12,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:12,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37101 {}] regionserver.HRegion(8855): Flush requested on b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:12,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b88c1b6447f023a04f2710e15f1fe9de:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T07:03:12,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:12,280 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T07:03:12,281 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-11T07:03:12,282 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 208108 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T07:03:12,282 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1541): b88c1b6447f023a04f2710e15f1fe9de/info is initiating minor compaction (all files) 2024-11-11T07:03:12,282 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b88c1b6447f023a04f2710e15f1fe9de/info in TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:12,282 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/9968071498654564a9d3bbfa990bb1d7, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b9e0d03052a4dcdb7e8cd0559938f1f, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/d613a10f6b0a4619b4159bce7a5c4b11] into tmpdir=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp, totalSize=203.2 K 2024-11-11T07:03:12,282 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9968071498654564a9d3bbfa990bb1d7, keycount=157, bloomtype=ROW, size=171.4 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1731308569886 2024-11-11T07:03:12,283 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3b9e0d03052a4dcdb7e8cd0559938f1f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1731308590224 2024-11-11T07:03:12,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:12,284 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] compactions.Compactor(225): Compacting d613a10f6b0a4619b4159bce7a5c4b11, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1731308592237 2024-11-11T07:03:12,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/0501099042264c6298687f33d4676d18 is 1080, key is row0240/info:/1731308592260/Put/seqid=0 2024-11-11T07:03:12,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741873_1049 (size=19013) 2024-11-11T07:03:12,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741873_1049 (size=19013) 2024-11-11T07:03:12,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/0501099042264c6298687f33d4676d18 2024-11-11T07:03:12,299 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b88c1b6447f023a04f2710e15f1fe9de#info#compaction#86 average throughput is 60.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T07:03:12,300 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/cef3d1bb722947a7a7475ee71ea4424d is 1080, key is row0062/info:/1731308569886/Put/seqid=0 2024-11-11T07:03:12,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/0501099042264c6298687f33d4676d18 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0501099042264c6298687f33d4676d18 2024-11-11T07:03:12,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741874_1050 (size=198258) 2024-11-11T07:03:12,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741874_1050 (size=198258) 2024-11-11T07:03:12,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0501099042264c6298687f33d4676d18, entries=13, sequenceid=327, filesize=18.6 K 2024-11-11T07:03:12,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for b88c1b6447f023a04f2710e15f1fe9de in 27ms, sequenceid=327, compaction requested=false 2024-11-11T07:03:12,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:12,310 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/cef3d1bb722947a7a7475ee71ea4424d as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/cef3d1bb722947a7a7475ee71ea4424d 2024-11-11T07:03:12,315 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b88c1b6447f023a04f2710e15f1fe9de/info of b88c1b6447f023a04f2710e15f1fe9de into cef3d1bb722947a7a7475ee71ea4424d(size=193.6 K), total size for store is 212.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T07:03:12,315 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:12,315 INFO [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de., storeName=b88c1b6447f023a04f2710e15f1fe9de/info, priority=13, startTime=1731308592280; duration=0sec 2024-11-11T07:03:12,316 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T07:03:12,316 DEBUG [RS:0;f334e8b0fbdf:37101-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b88c1b6447f023a04f2710e15f1fe9de:info 2024-11-11T07:03:13,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:13,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:13,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:14,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:14,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:14,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:14,290 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-11T07:03:14,290 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37101%2C1731308556905.1731308594290 2024-11-11T07:03:14,295 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,295 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,295 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,295 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,295 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,295 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905/f334e8b0fbdf%2C37101%2C1731308556905.1731308557305 with entries=312, filesize=308.51 KB; new WAL /user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905/f334e8b0fbdf%2C37101%2C1731308556905.1731308594290 2024-11-11T07:03:14,296 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45283:45283),(127.0.0.1/127.0.0.1:41665:41665)] 2024-11-11T07:03:14,296 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905/f334e8b0fbdf%2C37101%2C1731308556905.1731308557305 is not closed yet, will try archiving it next time 2024-11-11T07:03:14,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741833_1009 (size=315921) 2024-11-11T07:03:14,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741833_1009 (size=315921) 2024-11-11T07:03:14,300 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing b88c1b6447f023a04f2710e15f1fe9de 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-11T07:03:14,304 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/ae622b10f43f40dca6eb2555a59c45cc is 1080, key is row0253/info:/1731308592281/Put/seqid=0 2024-11-11T07:03:14,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741876_1052 (size=9278) 2024-11-11T07:03:14,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741876_1052 (size=9278) 2024-11-11T07:03:14,310 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/ae622b10f43f40dca6eb2555a59c45cc 2024-11-11T07:03:14,315 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/.tmp/info/ae622b10f43f40dca6eb2555a59c45cc as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/ae622b10f43f40dca6eb2555a59c45cc 2024-11-11T07:03:14,319 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/ae622b10f43f40dca6eb2555a59c45cc, entries=4, sequenceid=335, filesize=9.1 K 2024-11-11T07:03:14,320 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for b88c1b6447f023a04f2710e15f1fe9de in 20ms, sequenceid=335, compaction requested=true 2024-11-11T07:03:14,320 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b88c1b6447f023a04f2710e15f1fe9de: 2024-11-11T07:03:14,320 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-11T07:03:14,323 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/info/53222346bdc847579e672035f8532152 is 186, key is TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65./info:regioninfo/1731308572700/Put/seqid=0 2024-11-11T07:03:14,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741877_1053 (size=6153) 2024-11-11T07:03:14,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741877_1053 (size=6153) 2024-11-11T07:03:14,328 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/info/53222346bdc847579e672035f8532152 2024-11-11T07:03:14,333 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/.tmp/info/53222346bdc847579e672035f8532152 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/info/53222346bdc847579e672035f8532152 2024-11-11T07:03:14,337 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/info/53222346bdc847579e672035f8532152, entries=5, sequenceid=21, filesize=6.0 K 2024-11-11T07:03:14,338 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 18ms, sequenceid=21, compaction requested=false 2024-11-11T07:03:14,338 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T07:03:14,338 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b6429c4a74a02857045bfc2e68f9de65: 2024-11-11T07:03:14,339 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C37101%2C1731308556905.1731308594338 2024-11-11T07:03:14,343 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,343 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,343 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,343 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,343 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,343 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905/f334e8b0fbdf%2C37101%2C1731308556905.1731308594290 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905/f334e8b0fbdf%2C37101%2C1731308556905.1731308594338 2024-11-11T07:03:14,344 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41665:41665),(127.0.0.1/127.0.0.1:45283:45283)] 2024-11-11T07:03:14,344 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905/f334e8b0fbdf%2C37101%2C1731308556905.1731308594290 is not closed yet, will try archiving it next time 2024-11-11T07:03:14,344 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905/f334e8b0fbdf%2C37101%2C1731308556905.1731308557305 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/oldWALs/f334e8b0fbdf%2C37101%2C1731308556905.1731308557305 2024-11-11T07:03:14,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741875_1051 (size=731) 2024-11-11T07:03:14,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741875_1051 (size=731) 2024-11-11T07:03:14,345 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T07:03:14,346 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/WALs/f334e8b0fbdf,37101,1731308556905/f334e8b0fbdf%2C37101%2C1731308556905.1731308594290 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/oldWALs/f334e8b0fbdf%2C37101%2C1731308556905.1731308594290 2024-11-11T07:03:14,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T07:03:14,445 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:03:14,446 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:03:14,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:14,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:14,446 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T07:03:14,446 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T07:03:14,446 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1071097855, stopped=false 2024-11-11T07:03:14,446 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f334e8b0fbdf,39247,1731308556856 2024-11-11T07:03:14,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:03:14,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:03:14,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:14,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:14,448 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:03:14,448 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:03:14,448 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:03:14,448 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:14,449 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f334e8b0fbdf,37101,1731308556905' ***** 2024-11-11T07:03:14,449 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T07:03:14,449 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:03:14,449 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:03:14,449 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T07:03:14,449 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T07:03:14,449 INFO [RS:0;f334e8b0fbdf:37101 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T07:03:14,449 INFO [RS:0;f334e8b0fbdf:37101 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T07:03:14,449 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(3091): Received CLOSE for b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:14,449 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(3091): Received CLOSE for b6429c4a74a02857045bfc2e68f9de65 2024-11-11T07:03:14,449 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(959): stopping server f334e8b0fbdf,37101,1731308556905 2024-11-11T07:03:14,449 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:03:14,449 INFO [RS:0;f334e8b0fbdf:37101 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f334e8b0fbdf:37101. 2024-11-11T07:03:14,449 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b88c1b6447f023a04f2710e15f1fe9de, disabling compactions & flushes 2024-11-11T07:03:14,449 DEBUG [RS:0;f334e8b0fbdf:37101 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:03:14,449 DEBUG [RS:0;f334e8b0fbdf:37101 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:14,449 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:14,449 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:14,450 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T07:03:14,450 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. after waiting 1 ms 2024-11-11T07:03:14,450 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:14,450 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T07:03:14,450 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T07:03:14,450 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T07:03:14,450 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-11T07:03:14,450 DEBUG [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(1325): Online Regions={b88c1b6447f023a04f2710e15f1fe9de=TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de., 1588230740=hbase:meta,,1.1588230740, b6429c4a74a02857045bfc2e68f9de65=TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65.} 2024-11-11T07:03:14,450 DEBUG [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b6429c4a74a02857045bfc2e68f9de65, b88c1b6447f023a04f2710e15f1fe9de 2024-11-11T07:03:14,450 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:03:14,450 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:03:14,450 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:03:14,450 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:03:14,450 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:03:14,450 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6->hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/3b8e533f3bc4496a817810e5c29c8c06-top, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-367ecfe6f29c4488abc1cf09f88a0952, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-f4b31e7bf7084deeb648e45b30bdd138, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/7a1fb57e47e94687bbecb8fd6d0fb01a, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-960764f342ec4e868e11f7dfe500cf9a, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/81b11231060e433fab3b48aee92a4622, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/758cf46f3b2d42f3b983de63c0ca707c, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/188de2e434984323827287976f3142c0, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/fac289cbb55d4ae0937d66cbf492b746, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/c73b43f345f3400287324f467a4795d3, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a29532a3c1d74b0a8822c2928ba0d9aa, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/8de7465ffd1842209caefc0e43d8bf3d, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a97ce4b0b8d54caeb7bd75b4bf158540, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3aed57127e6542b8a0b040c76940b232, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0d56d25fc16c4c40964e061edb9f764e, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0f67a172801e453cafc948be6d94122d, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/4aa6a9b823914a3fa89d01f9bfaec05e, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/b7e28fa05e884352983bb3087ad30365, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/9968071498654564a9d3bbfa990bb1d7, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/266e6210b053410ea636046ef3fdf2b6, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b9e0d03052a4dcdb7e8cd0559938f1f, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/d613a10f6b0a4619b4159bce7a5c4b11] to archive 2024-11-11T07:03:14,451 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T07:03:14,453 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6 2024-11-11T07:03:14,454 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-11T07:03:14,454 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-367ecfe6f29c4488abc1cf09f88a0952 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-367ecfe6f29c4488abc1cf09f88a0952 2024-11-11T07:03:14,455 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:03:14,455 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:03:14,455 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308594450Running coprocessor pre-close hooks at 1731308594450Disabling compacts and flushes for region at 1731308594450Disabling writes for close at 1731308594450Writing region close event to WAL at 1731308594451 (+1 ms)Running coprocessor post-close hooks at 1731308594455 (+4 ms)Closed at 1731308594455 2024-11-11T07:03:14,455 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T07:03:14,455 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-f4b31e7bf7084deeb648e45b30bdd138 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-f4b31e7bf7084deeb648e45b30bdd138 2024-11-11T07:03:14,456 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/7a1fb57e47e94687bbecb8fd6d0fb01a to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/7a1fb57e47e94687bbecb8fd6d0fb01a 2024-11-11T07:03:14,457 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-960764f342ec4e868e11f7dfe500cf9a to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/TestLogRolling-testLogRolling=1de601fba810c6b237c13985fef558b6-960764f342ec4e868e11f7dfe500cf9a 2024-11-11T07:03:14,458 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/81b11231060e433fab3b48aee92a4622 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/81b11231060e433fab3b48aee92a4622 2024-11-11T07:03:14,459 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/758cf46f3b2d42f3b983de63c0ca707c to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/758cf46f3b2d42f3b983de63c0ca707c 2024-11-11T07:03:14,460 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/188de2e434984323827287976f3142c0 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/188de2e434984323827287976f3142c0 2024-11-11T07:03:14,461 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/fac289cbb55d4ae0937d66cbf492b746 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/fac289cbb55d4ae0937d66cbf492b746 2024-11-11T07:03:14,462 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/c73b43f345f3400287324f467a4795d3 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/c73b43f345f3400287324f467a4795d3 2024-11-11T07:03:14,463 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a29532a3c1d74b0a8822c2928ba0d9aa to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a29532a3c1d74b0a8822c2928ba0d9aa 2024-11-11T07:03:14,464 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/8de7465ffd1842209caefc0e43d8bf3d to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/8de7465ffd1842209caefc0e43d8bf3d 2024-11-11T07:03:14,465 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a97ce4b0b8d54caeb7bd75b4bf158540 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/a97ce4b0b8d54caeb7bd75b4bf158540 2024-11-11T07:03:14,466 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3aed57127e6542b8a0b040c76940b232 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3aed57127e6542b8a0b040c76940b232 2024-11-11T07:03:14,467 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0d56d25fc16c4c40964e061edb9f764e to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0d56d25fc16c4c40964e061edb9f764e 2024-11-11T07:03:14,468 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0f67a172801e453cafc948be6d94122d to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/0f67a172801e453cafc948be6d94122d 2024-11-11T07:03:14,469 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/4aa6a9b823914a3fa89d01f9bfaec05e to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/4aa6a9b823914a3fa89d01f9bfaec05e 2024-11-11T07:03:14,470 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/b7e28fa05e884352983bb3087ad30365 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/b7e28fa05e884352983bb3087ad30365 2024-11-11T07:03:14,471 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/9968071498654564a9d3bbfa990bb1d7 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/9968071498654564a9d3bbfa990bb1d7 2024-11-11T07:03:14,472 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/266e6210b053410ea636046ef3fdf2b6 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/266e6210b053410ea636046ef3fdf2b6 2024-11-11T07:03:14,473 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b9e0d03052a4dcdb7e8cd0559938f1f to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/3b9e0d03052a4dcdb7e8cd0559938f1f 2024-11-11T07:03:14,474 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/d613a10f6b0a4619b4159bce7a5c4b11 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/info/d613a10f6b0a4619b4159bce7a5c4b11 2024-11-11T07:03:14,474 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f334e8b0fbdf:39247 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-11T07:03:14,474 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [7a1fb57e47e94687bbecb8fd6d0fb01a=43081, 81b11231060e433fab3b48aee92a4622=12516, 758cf46f3b2d42f3b983de63c0ca707c=68045, 188de2e434984323827287976f3142c0=22238, fac289cbb55d4ae0937d66cbf492b746=15750, c73b43f345f3400287324f467a4795d3=93018, a29532a3c1d74b0a8822c2928ba0d9aa=19000, 8de7465ffd1842209caefc0e43d8bf3d=20078, a97ce4b0b8d54caeb7bd75b4bf158540=115762, 3aed57127e6542b8a0b040c76940b232=12516, 0d56d25fc16c4c40964e061edb9f764e=23316, 0f67a172801e453cafc948be6d94122d=147331, 4aa6a9b823914a3fa89d01f9bfaec05e=17906, b7e28fa05e884352983bb3087ad30365=12521, 9968071498654564a9d3bbfa990bb1d7=175493, 266e6210b053410ea636046ef3fdf2b6=25491, 3b9e0d03052a4dcdb7e8cd0559938f1f=12523, d613a10f6b0a4619b4159bce7a5c4b11=20092] 2024-11-11T07:03:14,478 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b88c1b6447f023a04f2710e15f1fe9de/recovered.edits/338.seqid, newMaxSeqId=338, maxSeqId=126 2024-11-11T07:03:14,478 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:14,478 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b88c1b6447f023a04f2710e15f1fe9de: Waiting for close lock at 1731308594449Running coprocessor pre-close hooks at 1731308594449Disabling compacts and flushes for region at 1731308594449Disabling writes for close at 1731308594450 (+1 ms)Writing region close event to WAL at 1731308594475 (+25 ms)Running coprocessor post-close hooks at 1731308594478 (+3 ms)Closed at 1731308594478 2024-11-11T07:03:14,478 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731308571979.b88c1b6447f023a04f2710e15f1fe9de. 2024-11-11T07:03:14,478 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b6429c4a74a02857045bfc2e68f9de65, disabling compactions & flushes 2024-11-11T07:03:14,478 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. 2024-11-11T07:03:14,478 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. 2024-11-11T07:03:14,478 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. after waiting 0 ms 2024-11-11T07:03:14,478 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. 2024-11-11T07:03:14,479 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6->hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/1de601fba810c6b237c13985fef558b6/info/3b8e533f3bc4496a817810e5c29c8c06-bottom] to archive 2024-11-11T07:03:14,479 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T07:03:14,481 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6 to hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/archive/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/info/3b8e533f3bc4496a817810e5c29c8c06.1de601fba810c6b237c13985fef558b6 2024-11-11T07:03:14,481 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-11T07:03:14,484 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/data/default/TestLogRolling-testLogRolling/b6429c4a74a02857045bfc2e68f9de65/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-11T07:03:14,484 INFO [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. 2024-11-11T07:03:14,484 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b6429c4a74a02857045bfc2e68f9de65: Waiting for close lock at 1731308594478Running coprocessor pre-close hooks at 1731308594478Disabling compacts and flushes for region at 1731308594478Disabling writes for close at 1731308594478Writing region close event to WAL at 1731308594481 (+3 ms)Running coprocessor post-close hooks at 1731308594484 (+3 ms)Closed at 1731308594484 2024-11-11T07:03:14,484 DEBUG [RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731308571979.b6429c4a74a02857045bfc2e68f9de65. 2024-11-11T07:03:14,650 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(976): stopping server f334e8b0fbdf,37101,1731308556905; all regions closed. 2024-11-11T07:03:14,651 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,651 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,651 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,651 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741834_1010 (size=8107) 2024-11-11T07:03:14,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741834_1010 (size=8107) 2024-11-11T07:03:14,655 DEBUG [RS:0;f334e8b0fbdf:37101 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/oldWALs 2024-11-11T07:03:14,655 INFO [RS:0;f334e8b0fbdf:37101 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C37101%2C1731308556905.meta:.meta(num 1731308557652) 2024-11-11T07:03:14,655 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,656 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,656 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,656 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,656 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741878_1054 (size=780) 2024-11-11T07:03:14,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741878_1054 (size=780) 2024-11-11T07:03:14,659 DEBUG [RS:0;f334e8b0fbdf:37101 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/oldWALs 2024-11-11T07:03:14,659 INFO [RS:0;f334e8b0fbdf:37101 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C37101%2C1731308556905:(num 1731308594338) 2024-11-11T07:03:14,659 DEBUG [RS:0;f334e8b0fbdf:37101 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:14,659 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:03:14,660 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:03:14,660 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.ChoreService(370): Chore service for: regionserver/f334e8b0fbdf:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T07:03:14,660 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:03:14,660 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:03:14,660 INFO [RS:0;f334e8b0fbdf:37101 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37101 2024-11-11T07:03:14,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:03:14,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f334e8b0fbdf,37101,1731308556905 2024-11-11T07:03:14,662 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:03:14,663 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f334e8b0fbdf,37101,1731308556905] 2024-11-11T07:03:14,666 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f334e8b0fbdf,37101,1731308556905 already deleted, retry=false 2024-11-11T07:03:14,666 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f334e8b0fbdf,37101,1731308556905 expired; onlineServers=0 2024-11-11T07:03:14,666 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f334e8b0fbdf,39247,1731308556856' ***** 2024-11-11T07:03:14,666 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T07:03:14,667 INFO [M:0;f334e8b0fbdf:39247 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:03:14,667 INFO [M:0;f334e8b0fbdf:39247 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:03:14,667 DEBUG [M:0;f334e8b0fbdf:39247 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T07:03:14,667 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T07:03:14,667 DEBUG [M:0;f334e8b0fbdf:39247 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T07:03:14,667 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308557057 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308557057,5,FailOnTimeoutGroup] 2024-11-11T07:03:14,667 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308557057 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308557057,5,FailOnTimeoutGroup] 2024-11-11T07:03:14,667 INFO [M:0;f334e8b0fbdf:39247 {}] hbase.ChoreService(370): Chore service for: master/f334e8b0fbdf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T07:03:14,667 INFO [M:0;f334e8b0fbdf:39247 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:03:14,667 DEBUG [M:0;f334e8b0fbdf:39247 {}] master.HMaster(1795): Stopping service threads 2024-11-11T07:03:14,667 INFO [M:0;f334e8b0fbdf:39247 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T07:03:14,667 INFO [M:0;f334e8b0fbdf:39247 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:03:14,668 INFO [M:0;f334e8b0fbdf:39247 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T07:03:14,668 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T07:03:14,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T07:03:14,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:14,668 DEBUG [M:0;f334e8b0fbdf:39247 {}] zookeeper.ZKUtil(347): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T07:03:14,668 WARN [M:0;f334e8b0fbdf:39247 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T07:03:14,669 INFO [M:0;f334e8b0fbdf:39247 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/.lastflushedseqids 2024-11-11T07:03:14,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741879_1055 (size=228) 2024-11-11T07:03:14,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741879_1055 (size=228) 2024-11-11T07:03:14,674 INFO [M:0;f334e8b0fbdf:39247 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T07:03:14,675 INFO [M:0;f334e8b0fbdf:39247 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T07:03:14,675 DEBUG [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:03:14,675 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:14,675 DEBUG [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:14,675 DEBUG [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:03:14,675 DEBUG [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:14,675 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.71 KB heapSize=65.93 KB 2024-11-11T07:03:14,690 DEBUG [M:0;f334e8b0fbdf:39247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2b3ab9d9a14247b7a6e0f046abfd7d53 is 82, key is hbase:meta,,1/info:regioninfo/1731308557673/Put/seqid=0 2024-11-11T07:03:14,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741880_1056 (size=5672) 2024-11-11T07:03:14,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741880_1056 (size=5672) 2024-11-11T07:03:14,695 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2b3ab9d9a14247b7a6e0f046abfd7d53 2024-11-11T07:03:14,713 DEBUG [M:0;f334e8b0fbdf:39247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5d84a4db4328456a9f7386a501c851d2 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731308558095/Put/seqid=0 2024-11-11T07:03:14,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741881_1057 (size=7681) 2024-11-11T07:03:14,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741881_1057 (size=7681) 2024-11-11T07:03:14,718 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.11 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5d84a4db4328456a9f7386a501c851d2 2024-11-11T07:03:14,723 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5d84a4db4328456a9f7386a501c851d2 2024-11-11T07:03:14,737 DEBUG [M:0;f334e8b0fbdf:39247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b82e3e357e0e48928fe565edf559a1a0 is 69, key is f334e8b0fbdf,37101,1731308556905/rs:state/1731308557138/Put/seqid=0 2024-11-11T07:03:14,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741882_1058 (size=5156) 2024-11-11T07:03:14,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741882_1058 (size=5156) 2024-11-11T07:03:14,742 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b82e3e357e0e48928fe565edf559a1a0 2024-11-11T07:03:14,759 DEBUG [M:0;f334e8b0fbdf:39247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ddf9b23a71c644b59a3ed748ce917679 is 52, key is load_balancer_on/state:d/1731308557727/Put/seqid=0 2024-11-11T07:03:14,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741883_1059 (size=5056) 2024-11-11T07:03:14,764 INFO [RS:0;f334e8b0fbdf:37101 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:03:14,764 INFO [RS:0;f334e8b0fbdf:37101 {}] regionserver.HRegionServer(1031): Exiting; stopping=f334e8b0fbdf,37101,1731308556905; zookeeper connection closed. 2024-11-11T07:03:14,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741883_1059 (size=5056) 2024-11-11T07:03:14,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:03:14,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37101-0x10105d5fcbb0001, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:03:14,764 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@d50bb76 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@d50bb76 2024-11-11T07:03:14,764 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ddf9b23a71c644b59a3ed748ce917679 2024-11-11T07:03:14,764 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T07:03:14,768 DEBUG [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2b3ab9d9a14247b7a6e0f046abfd7d53 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2b3ab9d9a14247b7a6e0f046abfd7d53 2024-11-11T07:03:14,772 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2b3ab9d9a14247b7a6e0f046abfd7d53, entries=8, sequenceid=129, filesize=5.5 K 2024-11-11T07:03:14,773 DEBUG [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5d84a4db4328456a9f7386a501c851d2 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5d84a4db4328456a9f7386a501c851d2 2024-11-11T07:03:14,776 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5d84a4db4328456a9f7386a501c851d2 2024-11-11T07:03:14,776 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5d84a4db4328456a9f7386a501c851d2, entries=14, sequenceid=129, filesize=7.5 K 2024-11-11T07:03:14,777 DEBUG [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b82e3e357e0e48928fe565edf559a1a0 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b82e3e357e0e48928fe565edf559a1a0 2024-11-11T07:03:14,781 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b82e3e357e0e48928fe565edf559a1a0, entries=1, sequenceid=129, filesize=5.0 K 2024-11-11T07:03:14,781 DEBUG [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ddf9b23a71c644b59a3ed748ce917679 as hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ddf9b23a71c644b59a3ed748ce917679 2024-11-11T07:03:14,785 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36089/user/jenkins/test-data/6b013c5a-be92-222e-e485-9d4d211d0925/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ddf9b23a71c644b59a3ed748ce917679, entries=1, sequenceid=129, filesize=4.9 K 2024-11-11T07:03:14,786 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.71 KB/54997, heapSize ~65.87 KB/67448, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=129, compaction requested=false 2024-11-11T07:03:14,787 INFO [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:14,787 DEBUG [M:0;f334e8b0fbdf:39247 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308594675Disabling compacts and flushes for region at 1731308594675Disabling writes for close at 1731308594675Obtaining lock to block concurrent updates at 1731308594675Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731308594675Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54997, getHeapSize=67448, getOffHeapSize=0, getCellsCount=152 at 1731308594675Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731308594676 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731308594676Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731308594690 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731308594690Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731308594699 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731308594713 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731308594713Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731308594723 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731308594737 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731308594737Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731308594746 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731308594759 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731308594759Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7df73e36: reopening flushed file at 1731308594768 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@720f5e: reopening flushed file at 1731308594772 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14514ed5: reopening flushed file at 1731308594777 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@226fd342: reopening flushed file at 1731308594781 (+4 ms)Finished flush of dataSize ~53.71 KB/54997, heapSize ~65.87 KB/67448, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=129, compaction requested=false at 1731308594786 (+5 ms)Writing region close event to WAL at 1731308594787 (+1 ms)Closed at 1731308594787 2024-11-11T07:03:14,788 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,788 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,788 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,788 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,788 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:14,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43571 is added to blk_1073741830_1006 (size=63927) 2024-11-11T07:03:14,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34399 is added to blk_1073741830_1006 (size=63927) 2024-11-11T07:03:14,790 INFO [M:0;f334e8b0fbdf:39247 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T07:03:14,790 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:03:14,790 INFO [M:0;f334e8b0fbdf:39247 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39247 2024-11-11T07:03:14,791 INFO [M:0;f334e8b0fbdf:39247 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:03:14,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:03:14,892 INFO [M:0;f334e8b0fbdf:39247 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:03:14,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39247-0x10105d5fcbb0000, quorum=127.0.0.1:64333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:03:14,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4328dba6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:03:14,895 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@250c37c4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:03:14,895 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:03:14,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444d0b71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:03:14,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ac7d52f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/hadoop.log.dir/,STOPPED} 2024-11-11T07:03:14,897 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:03:14,897 WARN [BP-641971557-172.17.0.2-1731308556201 heartbeating to localhost/127.0.0.1:36089 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:03:14,897 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:03:14,897 WARN [BP-641971557-172.17.0.2-1731308556201 heartbeating to localhost/127.0.0.1:36089 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-641971557-172.17.0.2-1731308556201 (Datanode Uuid dc8384a2-8ced-4f53-9483-25aa0a6dc1e5) service to localhost/127.0.0.1:36089 2024-11-11T07:03:14,898 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/data/data3/current/BP-641971557-172.17.0.2-1731308556201 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:03:14,898 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/data/data4/current/BP-641971557-172.17.0.2-1731308556201 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:03:14,898 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:03:14,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@419b7afd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:03:14,900 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8558d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:03:14,900 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:03:14,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a83b9f6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:03:14,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@256ffd79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/hadoop.log.dir/,STOPPED} 2024-11-11T07:03:14,902 WARN [BP-641971557-172.17.0.2-1731308556201 heartbeating to localhost/127.0.0.1:36089 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:03:14,902 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:03:14,902 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:03:14,902 WARN [BP-641971557-172.17.0.2-1731308556201 heartbeating to localhost/127.0.0.1:36089 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-641971557-172.17.0.2-1731308556201 (Datanode Uuid b63cddaf-1c61-4532-8c09-2e62c591ed6a) service to localhost/127.0.0.1:36089 2024-11-11T07:03:14,902 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/data/data1/current/BP-641971557-172.17.0.2-1731308556201 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:03:14,903 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/cluster_869babdb-b34a-b481-77d5-e4b91c1cf8f4/data/data2/current/BP-641971557-172.17.0.2-1731308556201 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:03:14,903 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:03:14,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47a053ae{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:03:14,909 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@446d84f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:03:14,909 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:03:14,909 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d320e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:03:14,909 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53fc2fb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/hadoop.log.dir/,STOPPED} 2024-11-11T07:03:14,916 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T07:03:14,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T07:03:14,951 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=231 (was 207) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36089 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36089 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36089 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:36089 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36089 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36089 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36089 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36089 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=516 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=25 (was 40), ProcessCount=11 (was 11), AvailableMemoryMB=8196 (was 8284) 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=231, OpenFileDescriptor=516, MaxFileDescriptor=1048576, SystemLoadAverage=25, ProcessCount=11, AvailableMemoryMB=8196 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/hadoop.log.dir so I do NOT create it in target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/74844443-5625-3ede-31ae-cf5061b49fc5/hadoop.tmp.dir so I do NOT create it in target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86, deleteOnExit=true 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/test.cache.data in system properties and HBase conf 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/hadoop.log.dir in system properties and HBase conf 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T07:03:14,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T07:03:14,960 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/nfs.dump.dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/java.io.tmpdir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T07:03:14,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T07:03:14,973 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:03:15,031 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:03:15,034 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:03:15,040 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:03:15,040 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:03:15,041 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:03:15,041 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:03:15,042 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3317cc02{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:03:15,042 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a248f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:03:15,152 INFO [regionserver/f334e8b0fbdf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:03:15,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c411d10{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/java.io.tmpdir/jetty-localhost-42303-hadoop-hdfs-3_4_1-tests_jar-_-any-14980746295924057026/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:03:15,157 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a06f288{HTTP/1.1, (http/1.1)}{localhost:42303} 2024-11-11T07:03:15,157 INFO [Time-limited test {}] server.Server(415): Started @278736ms 2024-11-11T07:03:15,170 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T07:03:15,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:15,224 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:03:15,226 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:03:15,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:03:15,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:03:15,227 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T07:03:15,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a21ffab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:03:15,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@665d17bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:03:15,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:15,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:15,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a6bcfcb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/java.io.tmpdir/jetty-localhost-40675-hadoop-hdfs-3_4_1-tests_jar-_-any-7141995308622908511/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:03:15,341 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60c41d21{HTTP/1.1, (http/1.1)}{localhost:40675} 2024-11-11T07:03:15,341 INFO [Time-limited test {}] server.Server(415): Started @278920ms 2024-11-11T07:03:15,342 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:03:15,369 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T07:03:15,372 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T07:03:15,372 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T07:03:15,372 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T07:03:15,372 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T07:03:15,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d7e0513{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/hadoop.log.dir/,AVAILABLE} 2024-11-11T07:03:15,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2191d18b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T07:03:15,439 WARN [Thread-2465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/data/data1/current/BP-595726902-172.17.0.2-1731308594980/current, will proceed with Du for space computation calculation, 2024-11-11T07:03:15,439 WARN [Thread-2466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/data/data2/current/BP-595726902-172.17.0.2-1731308594980/current, will proceed with Du for space computation calculation, 2024-11-11T07:03:15,479 WARN [Thread-2444 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:03:15,482 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x65ab4c5412039f90 with lease ID 0xfa95d4afd464a6ea: Processing first storage report for DS-983391f5-4675-4f7c-ad2a-c29444b4bed6 from datanode DatanodeRegistration(127.0.0.1:40533, datanodeUuid=461b5cf0-c9a8-4738-a28e-2498344ac4aa, infoPort=39307, infoSecurePort=0, ipcPort=43099, storageInfo=lv=-57;cid=testClusterID;nsid=1406795274;c=1731308594980) 2024-11-11T07:03:15,482 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x65ab4c5412039f90 with lease ID 0xfa95d4afd464a6ea: from storage DS-983391f5-4675-4f7c-ad2a-c29444b4bed6 node DatanodeRegistration(127.0.0.1:40533, datanodeUuid=461b5cf0-c9a8-4738-a28e-2498344ac4aa, infoPort=39307, infoSecurePort=0, ipcPort=43099, storageInfo=lv=-57;cid=testClusterID;nsid=1406795274;c=1731308594980), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:03:15,482 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x65ab4c5412039f90 with lease ID 0xfa95d4afd464a6ea: Processing first storage report for DS-2e160d73-c73e-49ba-b5ab-092a6dfa0937 from datanode DatanodeRegistration(127.0.0.1:40533, datanodeUuid=461b5cf0-c9a8-4738-a28e-2498344ac4aa, infoPort=39307, infoSecurePort=0, ipcPort=43099, storageInfo=lv=-57;cid=testClusterID;nsid=1406795274;c=1731308594980) 2024-11-11T07:03:15,482 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x65ab4c5412039f90 with lease ID 0xfa95d4afd464a6ea: from storage DS-2e160d73-c73e-49ba-b5ab-092a6dfa0937 node DatanodeRegistration(127.0.0.1:40533, datanodeUuid=461b5cf0-c9a8-4738-a28e-2498344ac4aa, infoPort=39307, infoSecurePort=0, ipcPort=43099, storageInfo=lv=-57;cid=testClusterID;nsid=1406795274;c=1731308594980), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:03:15,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d0ad989{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/java.io.tmpdir/jetty-localhost-45337-hadoop-hdfs-3_4_1-tests_jar-_-any-15201351147367349079/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:03:15,507 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e58a9be{HTTP/1.1, (http/1.1)}{localhost:45337} 2024-11-11T07:03:15,508 INFO [Time-limited test {}] server.Server(415): Started @279087ms 2024-11-11T07:03:15,509 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T07:03:15,604 WARN [Thread-2491 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/data/data3/current/BP-595726902-172.17.0.2-1731308594980/current, will proceed with Du for space computation calculation, 2024-11-11T07:03:15,604 WARN [Thread-2492 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/data/data4/current/BP-595726902-172.17.0.2-1731308594980/current, will proceed with Du for space computation calculation, 2024-11-11T07:03:15,627 WARN [Thread-2480 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T07:03:15,629 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3f820fbe790b76fd with lease ID 0xfa95d4afd464a6eb: Processing first storage report for DS-7e7b5f45-8b0f-41f2-9fea-d92f35bbe7e5 from datanode DatanodeRegistration(127.0.0.1:36333, datanodeUuid=0865c4b1-87f9-440d-8c18-ff87f23d0042, infoPort=35969, infoSecurePort=0, ipcPort=37081, storageInfo=lv=-57;cid=testClusterID;nsid=1406795274;c=1731308594980) 2024-11-11T07:03:15,629 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f820fbe790b76fd with lease ID 0xfa95d4afd464a6eb: from storage DS-7e7b5f45-8b0f-41f2-9fea-d92f35bbe7e5 node DatanodeRegistration(127.0.0.1:36333, datanodeUuid=0865c4b1-87f9-440d-8c18-ff87f23d0042, infoPort=35969, infoSecurePort=0, ipcPort=37081, storageInfo=lv=-57;cid=testClusterID;nsid=1406795274;c=1731308594980), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:03:15,629 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3f820fbe790b76fd with lease ID 0xfa95d4afd464a6eb: Processing first storage report for DS-c5ee924f-e7c6-4f35-954d-dabaf52b7369 from datanode DatanodeRegistration(127.0.0.1:36333, datanodeUuid=0865c4b1-87f9-440d-8c18-ff87f23d0042, infoPort=35969, infoSecurePort=0, ipcPort=37081, storageInfo=lv=-57;cid=testClusterID;nsid=1406795274;c=1731308594980) 2024-11-11T07:03:15,629 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f820fbe790b76fd with lease ID 0xfa95d4afd464a6eb: from storage DS-c5ee924f-e7c6-4f35-954d-dabaf52b7369 node DatanodeRegistration(127.0.0.1:36333, datanodeUuid=0865c4b1-87f9-440d-8c18-ff87f23d0042, infoPort=35969, infoSecurePort=0, ipcPort=37081, storageInfo=lv=-57;cid=testClusterID;nsid=1406795274;c=1731308594980), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T07:03:15,630 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000 2024-11-11T07:03:15,632 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/zookeeper_0, clientPort=60298, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T07:03:15,633 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60298 2024-11-11T07:03:15,633 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:03:15,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:03:15,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:03:15,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741825_1001 (size=7) 2024-11-11T07:03:15,643 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46 with version=8 2024-11-11T07:03:15,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45731/user/jenkins/test-data/fa8d8ad9-f73b-2e6f-fdba-9e7ecbd8cfe5/hbase-staging 2024-11-11T07:03:15,645 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:03:15,645 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:03:15,645 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:03:15,645 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:03:15,645 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:03:15,645 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:03:15,645 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T07:03:15,646 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:03:15,646 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45229 2024-11-11T07:03:15,647 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45229 connecting to ZooKeeper ensemble=127.0.0.1:60298 2024-11-11T07:03:15,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452290x0, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:03:15,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45229-0x10105d694400000 connected 2024-11-11T07:03:15,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:03:15,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:03:15,675 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:03:15,675 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46, hbase.cluster.distributed=false 2024-11-11T07:03:15,676 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:03:15,679 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45229 2024-11-11T07:03:15,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45229 2024-11-11T07:03:15,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45229 2024-11-11T07:03:15,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45229 2024-11-11T07:03:15,683 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45229 2024-11-11T07:03:15,697 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f334e8b0fbdf:0 server-side Connection retries=45 2024-11-11T07:03:15,697 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:03:15,697 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T07:03:15,697 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T07:03:15,697 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T07:03:15,697 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T07:03:15,697 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T07:03:15,698 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T07:03:15,698 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33409 2024-11-11T07:03:15,699 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33409 connecting to ZooKeeper ensemble=127.0.0.1:60298 2024-11-11T07:03:15,699 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:03:15,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:03:15,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334090x0, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T07:03:15,705 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:334090x0, quorum=127.0.0.1:60298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:03:15,705 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33409-0x10105d694400001 connected 2024-11-11T07:03:15,705 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T07:03:15,706 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T07:03:15,706 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T07:03:15,707 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T07:03:15,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33409 2024-11-11T07:03:15,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33409 2024-11-11T07:03:15,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33409 2024-11-11T07:03:15,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33409 2024-11-11T07:03:15,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33409 2024-11-11T07:03:15,719 DEBUG [M:0;f334e8b0fbdf:45229 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f334e8b0fbdf:45229 2024-11-11T07:03:15,720 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f334e8b0fbdf,45229,1731308595645 2024-11-11T07:03:15,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:03:15,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:03:15,722 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f334e8b0fbdf,45229,1731308595645 2024-11-11T07:03:15,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T07:03:15,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:15,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:15,724 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T07:03:15,724 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f334e8b0fbdf,45229,1731308595645 from backup master directory 2024-11-11T07:03:15,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f334e8b0fbdf,45229,1731308595645 2024-11-11T07:03:15,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:03:15,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T07:03:15,728 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:03:15,728 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f334e8b0fbdf,45229,1731308595645 2024-11-11T07:03:15,731 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/hbase.id] with ID: 9d807fc8-4382-4ad2-a713-1e1949e98451 2024-11-11T07:03:15,731 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/.tmp/hbase.id 2024-11-11T07:03:15,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:03:15,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741826_1002 (size=42) 2024-11-11T07:03:15,737 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/.tmp/hbase.id]:[hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/hbase.id] 2024-11-11T07:03:15,746 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:03:15,746 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T07:03:15,747 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T07:03:15,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:15,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:15,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:03:15,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741827_1003 (size=196) 2024-11-11T07:03:15,754 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T07:03:15,755 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T07:03:15,755 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:03:15,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:03:15,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741828_1004 (size=1189) 2024-11-11T07:03:15,762 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store 2024-11-11T07:03:15,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:03:15,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741829_1005 (size=34) 2024-11-11T07:03:15,767 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:03:15,767 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:03:15,767 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:15,768 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:15,768 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:03:15,768 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:15,768 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:15,768 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308595767Disabling compacts and flushes for region at 1731308595767Disabling writes for close at 1731308595768 (+1 ms)Writing region close event to WAL at 1731308595768Closed at 1731308595768 2024-11-11T07:03:15,768 WARN [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/.initializing 2024-11-11T07:03:15,768 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/WALs/f334e8b0fbdf,45229,1731308595645 2024-11-11T07:03:15,770 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C45229%2C1731308595645, suffix=, logDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/WALs/f334e8b0fbdf,45229,1731308595645, archiveDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/oldWALs, maxLogs=10 2024-11-11T07:03:15,771 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C45229%2C1731308595645.1731308595770 2024-11-11T07:03:15,775 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/WALs/f334e8b0fbdf,45229,1731308595645/f334e8b0fbdf%2C45229%2C1731308595645.1731308595770 2024-11-11T07:03:15,775 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39307:39307),(127.0.0.1/127.0.0.1:35969:35969)] 2024-11-11T07:03:15,776 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:03:15,776 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:03:15,776 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,776 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,777 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,778 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T07:03:15,778 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:15,778 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:03:15,778 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T07:03:15,779 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:15,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:03:15,780 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,780 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T07:03:15,780 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:15,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:03:15,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T07:03:15,782 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:15,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T07:03:15,782 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,783 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,783 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,784 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,784 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,784 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T07:03:15,785 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T07:03:15,786 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:03:15,787 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772559, jitterRate=-0.01764105260372162}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T07:03:15,787 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731308595776Initializing all the Stores at 1731308595777 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308595777Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308595777Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308595777Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308595777Cleaning up temporary data from old regions at 1731308595784 (+7 ms)Region opened successfully at 1731308595787 (+3 ms) 2024-11-11T07:03:15,787 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T07:03:15,790 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cbcb9cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:03:15,790 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T07:03:15,791 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T07:03:15,791 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T07:03:15,791 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T07:03:15,791 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T07:03:15,791 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T07:03:15,791 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T07:03:15,793 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T07:03:15,794 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T07:03:15,795 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T07:03:15,795 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T07:03:15,796 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T07:03:15,797 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T07:03:15,797 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T07:03:15,798 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T07:03:15,801 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T07:03:15,802 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T07:03:15,803 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T07:03:15,804 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T07:03:15,805 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T07:03:15,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:03:15,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T07:03:15,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:15,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:15,808 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f334e8b0fbdf,45229,1731308595645, sessionid=0x10105d694400000, setting cluster-up flag (Was=false) 2024-11-11T07:03:15,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:15,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:15,819 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T07:03:15,820 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,45229,1731308595645 2024-11-11T07:03:15,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:15,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:15,829 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T07:03:15,830 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f334e8b0fbdf,45229,1731308595645 2024-11-11T07:03:15,831 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T07:03:15,833 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T07:03:15,833 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T07:03:15,833 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T07:03:15,833 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f334e8b0fbdf,45229,1731308595645 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T07:03:15,834 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:03:15,834 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:03:15,834 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:03:15,834 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=5, maxPoolSize=5 2024-11-11T07:03:15,834 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f334e8b0fbdf:0, corePoolSize=10, maxPoolSize=10 2024-11-11T07:03:15,835 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,835 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:03:15,835 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,836 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:03:15,836 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T07:03:15,837 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:15,837 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T07:03:15,843 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731308625843 2024-11-11T07:03:15,843 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T07:03:15,843 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T07:03:15,843 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T07:03:15,843 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T07:03:15,843 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T07:03:15,843 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T07:03:15,843 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,844 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T07:03:15,844 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T07:03:15,844 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T07:03:15,844 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T07:03:15,844 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T07:03:15,845 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308595844,5,FailOnTimeoutGroup] 2024-11-11T07:03:15,846 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308595845,5,FailOnTimeoutGroup] 2024-11-11T07:03:15,846 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,846 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T07:03:15,846 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,846 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:03:15,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741831_1007 (size=1321) 2024-11-11T07:03:15,851 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T07:03:15,851 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46 2024-11-11T07:03:15,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:03:15,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741832_1008 (size=32) 2024-11-11T07:03:15,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:03:15,860 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:03:15,861 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:03:15,861 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:15,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:03:15,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:03:15,862 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:03:15,862 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:15,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:03:15,863 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:03:15,863 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:03:15,863 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:15,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:03:15,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:03:15,865 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:03:15,865 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:15,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:03:15,865 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:03:15,866 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740 2024-11-11T07:03:15,866 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740 2024-11-11T07:03:15,867 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:03:15,867 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:03:15,868 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:03:15,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:03:15,870 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T07:03:15,871 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876459, jitterRate=0.11447535455226898}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:03:15,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731308595859Initializing all the Stores at 1731308595859Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308595860 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308595860Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308595860Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308595860Cleaning up temporary data from old regions at 1731308595867 (+7 ms)Region opened successfully at 1731308595872 (+5 ms) 2024-11-11T07:03:15,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:03:15,872 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:03:15,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:03:15,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:03:15,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:03:15,872 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:03:15,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308595872Disabling compacts and flushes for region at 1731308595872Disabling writes for close at 1731308595872Writing region close event to WAL at 1731308595872Closed at 1731308595872 2024-11-11T07:03:15,873 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:03:15,873 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T07:03:15,873 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T07:03:15,874 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:03:15,875 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T07:03:15,910 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(746): ClusterId : 9d807fc8-4382-4ad2-a713-1e1949e98451 2024-11-11T07:03:15,910 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T07:03:15,912 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T07:03:15,912 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T07:03:15,913 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T07:03:15,914 DEBUG [RS:0;f334e8b0fbdf:33409 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@563c24ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f334e8b0fbdf/172.17.0.2:0 2024-11-11T07:03:15,925 DEBUG [RS:0;f334e8b0fbdf:33409 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f334e8b0fbdf:33409 2024-11-11T07:03:15,925 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T07:03:15,925 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T07:03:15,925 DEBUG [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T07:03:15,926 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(2659): reportForDuty to master=f334e8b0fbdf,45229,1731308595645 with port=33409, startcode=1731308595697 2024-11-11T07:03:15,926 DEBUG [RS:0;f334e8b0fbdf:33409 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T07:03:15,928 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34299, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T07:03:15,929 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45229 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:15,929 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45229 {}] master.ServerManager(517): Registering regionserver=f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:15,930 DEBUG [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46 2024-11-11T07:03:15,930 DEBUG [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33957 2024-11-11T07:03:15,930 DEBUG [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T07:03:15,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:03:15,934 DEBUG [RS:0;f334e8b0fbdf:33409 {}] zookeeper.ZKUtil(111): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:15,934 WARN [RS:0;f334e8b0fbdf:33409 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T07:03:15,934 INFO [RS:0;f334e8b0fbdf:33409 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:03:15,934 DEBUG [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:15,934 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f334e8b0fbdf,33409,1731308595697] 2024-11-11T07:03:15,937 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T07:03:15,938 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T07:03:15,939 INFO [RS:0;f334e8b0fbdf:33409 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T07:03:15,939 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,939 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T07:03:15,939 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T07:03:15,940 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=2, maxPoolSize=2 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f334e8b0fbdf:0, corePoolSize=1, maxPoolSize=1 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:03:15,940 DEBUG [RS:0;f334e8b0fbdf:33409 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f334e8b0fbdf:0, corePoolSize=3, maxPoolSize=3 2024-11-11T07:03:15,940 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,940 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,940 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,940 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,940 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,940 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,33409,1731308595697-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:03:15,955 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T07:03:15,955 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,33409,1731308595697-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,955 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,955 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.Replication(171): f334e8b0fbdf,33409,1731308595697 started 2024-11-11T07:03:15,968 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:15,969 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(1482): Serving as f334e8b0fbdf,33409,1731308595697, RpcServer on f334e8b0fbdf/172.17.0.2:33409, sessionid=0x10105d694400001 2024-11-11T07:03:15,969 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T07:03:15,969 DEBUG [RS:0;f334e8b0fbdf:33409 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:15,969 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,33409,1731308595697' 2024-11-11T07:03:15,969 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T07:03:15,969 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T07:03:15,970 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T07:03:15,970 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T07:03:15,970 DEBUG [RS:0;f334e8b0fbdf:33409 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:15,970 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f334e8b0fbdf,33409,1731308595697' 2024-11-11T07:03:15,970 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T07:03:15,970 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T07:03:15,970 DEBUG [RS:0;f334e8b0fbdf:33409 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T07:03:15,970 INFO [RS:0;f334e8b0fbdf:33409 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T07:03:15,971 INFO [RS:0;f334e8b0fbdf:33409 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T07:03:16,026 WARN [f334e8b0fbdf:45229 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T07:03:16,072 INFO [RS:0;f334e8b0fbdf:33409 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C33409%2C1731308595697, suffix=, logDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/f334e8b0fbdf,33409,1731308595697, archiveDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/oldWALs, maxLogs=32 2024-11-11T07:03:16,073 INFO [RS:0;f334e8b0fbdf:33409 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C33409%2C1731308595697.1731308596073 2024-11-11T07:03:16,078 INFO [RS:0;f334e8b0fbdf:33409 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/f334e8b0fbdf,33409,1731308595697/f334e8b0fbdf%2C33409%2C1731308595697.1731308596073 2024-11-11T07:03:16,079 DEBUG [RS:0;f334e8b0fbdf:33409 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39307:39307),(127.0.0.1/127.0.0.1:35969:35969)] 2024-11-11T07:03:16,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.1731308420824 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:16,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,42847,1731308421347/f334e8b0fbdf%2C42847%2C1731308421347.1731308421530 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:16,276 DEBUG [f334e8b0fbdf:45229 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T07:03:16,276 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:16,278 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,33409,1731308595697, state=OPENING 2024-11-11T07:03:16,280 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T07:03:16,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:16,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:16,282 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T07:03:16,282 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:03:16,282 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:03:16,282 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,33409,1731308595697}] 2024-11-11T07:03:16,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41533/user/jenkins/test-data/4d91908d-7607-860b-fb87-4b86fb1c3842/WALs/f334e8b0fbdf,45321,1731308420401/f334e8b0fbdf%2C45321%2C1731308420401.meta.1731308421182.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T07:03:16,435 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T07:03:16,437 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60037, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T07:03:16,440 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T07:03:16,440 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:03:16,442 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f334e8b0fbdf%2C33409%2C1731308595697.meta, suffix=.meta, logDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/f334e8b0fbdf,33409,1731308595697, archiveDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/oldWALs, maxLogs=32 2024-11-11T07:03:16,442 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f334e8b0fbdf%2C33409%2C1731308595697.meta.1731308596442.meta 2024-11-11T07:03:16,449 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/f334e8b0fbdf,33409,1731308595697/f334e8b0fbdf%2C33409%2C1731308595697.meta.1731308596442.meta 2024-11-11T07:03:16,452 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35969:35969),(127.0.0.1/127.0.0.1:39307:39307)] 2024-11-11T07:03:16,453 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T07:03:16,454 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T07:03:16,454 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T07:03:16,454 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T07:03:16,454 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T07:03:16,454 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T07:03:16,454 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T07:03:16,454 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T07:03:16,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T07:03:16,456 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T07:03:16,456 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:16,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:03:16,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T07:03:16,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T07:03:16,457 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:16,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:03:16,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T07:03:16,458 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T07:03:16,458 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:16,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:03:16,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T07:03:16,459 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T07:03:16,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T07:03:16,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T07:03:16,459 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T07:03:16,460 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740 2024-11-11T07:03:16,460 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740 2024-11-11T07:03:16,461 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T07:03:16,461 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T07:03:16,462 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T07:03:16,463 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T07:03:16,463 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=807646, jitterRate=0.026976212859153748}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T07:03:16,463 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T07:03:16,464 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731308596454Writing region info on filesystem at 1731308596454Initializing all the Stores at 1731308596455 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308596455Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308596455Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731308596455Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731308596455Cleaning up temporary data from old regions at 1731308596461 (+6 ms)Running coprocessor post-open hooks at 1731308596463 (+2 ms)Region opened successfully at 1731308596464 (+1 ms) 2024-11-11T07:03:16,465 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731308596435 2024-11-11T07:03:16,467 DEBUG [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T07:03:16,467 INFO [RS_OPEN_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T07:03:16,467 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:16,468 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f334e8b0fbdf,33409,1731308595697, state=OPEN 2024-11-11T07:03:16,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:03:16,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T07:03:16,474 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:16,474 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:03:16,474 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T07:03:16,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T07:03:16,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f334e8b0fbdf,33409,1731308595697 in 192 msec 2024-11-11T07:03:16,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T07:03:16,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-11-11T07:03:16,478 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T07:03:16,478 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T07:03:16,479 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:03:16,479 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,33409,1731308595697, seqNum=-1] 2024-11-11T07:03:16,480 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:03:16,481 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44195, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:03:16,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 653 msec 2024-11-11T07:03:16,486 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731308596486, completionTime=-1 2024-11-11T07:03:16,487 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T07:03:16,487 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T07:03:16,489 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-11T07:03:16,489 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731308656489 2024-11-11T07:03:16,489 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731308716489 2024-11-11T07:03:16,489 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-11T07:03:16,489 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,45229,1731308595645-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:16,489 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,45229,1731308595645-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:16,489 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,45229,1731308595645-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:16,489 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f334e8b0fbdf:45229, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:16,489 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:16,490 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:16,491 DEBUG [master/f334e8b0fbdf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T07:03:16,493 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.765sec 2024-11-11T07:03:16,493 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T07:03:16,493 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T07:03:16,493 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T07:03:16,494 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T07:03:16,494 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T07:03:16,494 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,45229,1731308595645-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T07:03:16,494 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,45229,1731308595645-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T07:03:16,496 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T07:03:16,496 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T07:03:16,496 INFO [master/f334e8b0fbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f334e8b0fbdf,45229,1731308595645-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T07:03:16,510 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ba1b0df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:03:16,510 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f334e8b0fbdf,45229,-1 for getting cluster id 2024-11-11T07:03:16,510 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T07:03:16,511 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9d807fc8-4382-4ad2-a713-1e1949e98451' 2024-11-11T07:03:16,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T07:03:16,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9d807fc8-4382-4ad2-a713-1e1949e98451" 2024-11-11T07:03:16,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@780b7386, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:03:16,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f334e8b0fbdf,45229,-1] 2024-11-11T07:03:16,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T07:03:16,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:16,513 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35318, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T07:03:16,514 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72757a2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T07:03:16,514 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T07:03:16,515 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f334e8b0fbdf,33409,1731308595697, seqNum=-1] 2024-11-11T07:03:16,515 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T07:03:16,516 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53792, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T07:03:16,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f334e8b0fbdf,45229,1731308595645 2024-11-11T07:03:16,518 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T07:03:16,520 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T07:03:16,520 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T07:03:16,522 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/test.com,8080,1, archiveDir=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/oldWALs, maxLogs=32 2024-11-11T07:03:16,522 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731308596522 2024-11-11T07:03:16,527 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/test.com,8080,1/test.com%2C8080%2C1.1731308596522 2024-11-11T07:03:16,528 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39307:39307),(127.0.0.1/127.0.0.1:35969:35969)] 2024-11-11T07:03:16,530 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731308596530 2024-11-11T07:03:16,535 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,535 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,536 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,536 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,536 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,536 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/test.com,8080,1/test.com%2C8080%2C1.1731308596522 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/test.com,8080,1/test.com%2C8080%2C1.1731308596530 2024-11-11T07:03:16,537 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35969:35969),(127.0.0.1/127.0.0.1:39307:39307)] 2024-11-11T07:03:16,537 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/test.com,8080,1/test.com%2C8080%2C1.1731308596522 is not closed yet, will try archiving it next time 2024-11-11T07:03:16,537 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741835_1011 (size=93) 2024-11-11T07:03:16,537 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,537 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,537 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,538 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741835_1011 (size=93) 2024-11-11T07:03:16,538 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/WALs/test.com,8080,1/test.com%2C8080%2C1.1731308596522 to hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/oldWALs/test.com%2C8080%2C1.1731308596522 2024-11-11T07:03:16,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741836_1012 (size=93) 2024-11-11T07:03:16,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741836_1012 (size=93) 2024-11-11T07:03:16,541 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/oldWALs 2024-11-11T07:03:16,541 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731308596530) 2024-11-11T07:03:16,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T07:03:16,541 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:03:16,541 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:03:16,541 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:16,541 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:16,542 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T07:03:16,542 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T07:03:16,542 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=313938093, stopped=false 2024-11-11T07:03:16,542 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f334e8b0fbdf,45229,1731308595645 2024-11-11T07:03:16,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:03:16,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T07:03:16,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:16,543 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:03:16,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:16,544 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T07:03:16,544 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:03:16,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:16,544 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f334e8b0fbdf,33409,1731308595697' ***** 2024-11-11T07:03:16,544 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T07:03:16,544 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:03:16,544 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T07:03:16,544 INFO [RS:0;f334e8b0fbdf:33409 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T07:03:16,544 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T07:03:16,544 INFO [RS:0;f334e8b0fbdf:33409 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T07:03:16,544 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(959): stopping server f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:16,544 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:03:16,544 INFO [RS:0;f334e8b0fbdf:33409 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f334e8b0fbdf:33409. 2024-11-11T07:03:16,544 DEBUG [RS:0;f334e8b0fbdf:33409 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T07:03:16,544 DEBUG [RS:0;f334e8b0fbdf:33409 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:16,545 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T07:03:16,545 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T07:03:16,545 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T07:03:16,545 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T07:03:16,545 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T07:03:16,545 DEBUG [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T07:03:16,545 DEBUG [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T07:03:16,545 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T07:03:16,545 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T07:03:16,545 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T07:03:16,545 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T07:03:16,545 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T07:03:16,545 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-11T07:03:16,548 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T07:03:16,561 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740/.tmp/ns/e908dd639d044d37bd4110970f59533f is 43, key is default/ns:d/1731308596482/Put/seqid=0 2024-11-11T07:03:16,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741837_1013 (size=5153) 2024-11-11T07:03:16,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741837_1013 (size=5153) 2024-11-11T07:03:16,566 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740/.tmp/ns/e908dd639d044d37bd4110970f59533f 2024-11-11T07:03:16,571 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740/.tmp/ns/e908dd639d044d37bd4110970f59533f as hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740/ns/e908dd639d044d37bd4110970f59533f 2024-11-11T07:03:16,575 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740/ns/e908dd639d044d37bd4110970f59533f, entries=2, sequenceid=6, filesize=5.0 K 2024-11-11T07:03:16,575 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-11T07:03:16,579 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T07:03:16,579 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T07:03:16,579 INFO [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T07:03:16,579 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731308596545Running coprocessor pre-close hooks at 1731308596545Disabling compacts and flushes for region at 1731308596545Disabling writes for close at 1731308596545Obtaining lock to block concurrent updates at 1731308596545Preparing flush snapshotting stores in 1588230740 at 1731308596545Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731308596546 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731308596546Flushing 1588230740/ns: creating writer at 1731308596546Flushing 1588230740/ns: appending metadata at 1731308596561 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731308596561Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f60afd: reopening flushed file at 1731308596570 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1731308596575 (+5 ms)Writing region close event to WAL at 1731308596576 (+1 ms)Running coprocessor post-close hooks at 1731308596579 (+3 ms)Closed at 1731308596579 2024-11-11T07:03:16,579 DEBUG [RS_CLOSE_META-regionserver/f334e8b0fbdf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T07:03:16,745 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(976): stopping server f334e8b0fbdf,33409,1731308595697; all regions closed. 2024-11-11T07:03:16,746 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,746 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,746 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,746 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,746 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741834_1010 (size=1152) 2024-11-11T07:03:16,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741834_1010 (size=1152) 2024-11-11T07:03:16,750 DEBUG [RS:0;f334e8b0fbdf:33409 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/oldWALs 2024-11-11T07:03:16,750 INFO [RS:0;f334e8b0fbdf:33409 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C33409%2C1731308595697.meta:.meta(num 1731308596442) 2024-11-11T07:03:16,750 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,750 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,750 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,751 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,751 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741833_1009 (size=93) 2024-11-11T07:03:16,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741833_1009 (size=93) 2024-11-11T07:03:16,755 DEBUG [RS:0;f334e8b0fbdf:33409 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/oldWALs 2024-11-11T07:03:16,755 INFO [RS:0;f334e8b0fbdf:33409 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f334e8b0fbdf%2C33409%2C1731308595697:(num 1731308596073) 2024-11-11T07:03:16,755 DEBUG [RS:0;f334e8b0fbdf:33409 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T07:03:16,755 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T07:03:16,755 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:03:16,755 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.ChoreService(370): Chore service for: regionserver/f334e8b0fbdf:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T07:03:16,755 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:03:16,755 INFO [regionserver/f334e8b0fbdf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:03:16,755 INFO [RS:0;f334e8b0fbdf:33409 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33409 2024-11-11T07:03:16,757 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:03:16,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f334e8b0fbdf,33409,1731308595697 2024-11-11T07:03:16,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T07:03:16,760 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f334e8b0fbdf,33409,1731308595697] 2024-11-11T07:03:16,761 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f334e8b0fbdf,33409,1731308595697 already deleted, retry=false 2024-11-11T07:03:16,761 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f334e8b0fbdf,33409,1731308595697 expired; onlineServers=0 2024-11-11T07:03:16,761 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f334e8b0fbdf,45229,1731308595645' ***** 2024-11-11T07:03:16,761 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T07:03:16,761 INFO [M:0;f334e8b0fbdf:45229 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T07:03:16,761 INFO [M:0;f334e8b0fbdf:45229 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T07:03:16,762 DEBUG [M:0;f334e8b0fbdf:45229 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T07:03:16,762 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T07:03:16,762 DEBUG [M:0;f334e8b0fbdf:45229 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T07:03:16,762 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308595845 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.small.0-1731308595845,5,FailOnTimeoutGroup] 2024-11-11T07:03:16,762 DEBUG [master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308595844 {}] cleaner.HFileCleaner(306): Exit Thread[master/f334e8b0fbdf:0:becomeActiveMaster-HFileCleaner.large.0-1731308595844,5,FailOnTimeoutGroup] 2024-11-11T07:03:16,762 INFO [M:0;f334e8b0fbdf:45229 {}] hbase.ChoreService(370): Chore service for: master/f334e8b0fbdf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T07:03:16,762 INFO [M:0;f334e8b0fbdf:45229 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T07:03:16,762 DEBUG [M:0;f334e8b0fbdf:45229 {}] master.HMaster(1795): Stopping service threads 2024-11-11T07:03:16,762 INFO [M:0;f334e8b0fbdf:45229 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T07:03:16,762 INFO [M:0;f334e8b0fbdf:45229 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T07:03:16,762 INFO [M:0;f334e8b0fbdf:45229 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T07:03:16,762 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T07:03:16,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T07:03:16,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T07:03:16,763 DEBUG [M:0;f334e8b0fbdf:45229 {}] zookeeper.ZKUtil(347): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T07:03:16,763 WARN [M:0;f334e8b0fbdf:45229 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T07:03:16,764 INFO [M:0;f334e8b0fbdf:45229 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/.lastflushedseqids 2024-11-11T07:03:16,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741838_1014 (size=99) 2024-11-11T07:03:16,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741838_1014 (size=99) 2024-11-11T07:03:16,769 INFO [M:0;f334e8b0fbdf:45229 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T07:03:16,769 INFO [M:0;f334e8b0fbdf:45229 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T07:03:16,769 DEBUG [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T07:03:16,769 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:16,769 DEBUG [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:16,769 DEBUG [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T07:03:16,769 DEBUG [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:16,769 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-11T07:03:16,785 DEBUG [M:0;f334e8b0fbdf:45229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e09524ba5d3041c8bd2edbd703b2d9fe is 82, key is hbase:meta,,1/info:regioninfo/1731308596467/Put/seqid=0 2024-11-11T07:03:16,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741839_1015 (size=5672) 2024-11-11T07:03:16,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741839_1015 (size=5672) 2024-11-11T07:03:16,790 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e09524ba5d3041c8bd2edbd703b2d9fe 2024-11-11T07:03:16,808 DEBUG [M:0;f334e8b0fbdf:45229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d513de73d5834bcabe57d62df36f4bc8 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731308596486/Put/seqid=0 2024-11-11T07:03:16,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741840_1016 (size=5275) 2024-11-11T07:03:16,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741840_1016 (size=5275) 2024-11-11T07:03:16,812 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d513de73d5834bcabe57d62df36f4bc8 2024-11-11T07:03:16,830 DEBUG [M:0;f334e8b0fbdf:45229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/266dc5409fd84a2f9ee2889721ecb0bb is 69, key is f334e8b0fbdf,33409,1731308595697/rs:state/1731308595929/Put/seqid=0 2024-11-11T07:03:16,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741841_1017 (size=5156) 2024-11-11T07:03:16,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741841_1017 (size=5156) 2024-11-11T07:03:16,835 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/266dc5409fd84a2f9ee2889721ecb0bb 2024-11-11T07:03:16,852 DEBUG [M:0;f334e8b0fbdf:45229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0c13f99f2c824a47b400c45c4953d0b9 is 52, key is load_balancer_on/state:d/1731308596519/Put/seqid=0 2024-11-11T07:03:16,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741842_1018 (size=5056) 2024-11-11T07:03:16,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741842_1018 (size=5056) 2024-11-11T07:03:16,857 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0c13f99f2c824a47b400c45c4953d0b9 2024-11-11T07:03:16,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:03:16,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33409-0x10105d694400001, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:03:16,860 INFO [RS:0;f334e8b0fbdf:33409 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:03:16,860 INFO [RS:0;f334e8b0fbdf:33409 {}] regionserver.HRegionServer(1031): Exiting; stopping=f334e8b0fbdf,33409,1731308595697; zookeeper connection closed. 2024-11-11T07:03:16,860 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@35889feb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@35889feb 2024-11-11T07:03:16,861 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T07:03:16,861 DEBUG [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e09524ba5d3041c8bd2edbd703b2d9fe as hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e09524ba5d3041c8bd2edbd703b2d9fe 2024-11-11T07:03:16,865 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e09524ba5d3041c8bd2edbd703b2d9fe, entries=8, sequenceid=29, filesize=5.5 K 2024-11-11T07:03:16,865 DEBUG [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d513de73d5834bcabe57d62df36f4bc8 as hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d513de73d5834bcabe57d62df36f4bc8 2024-11-11T07:03:16,869 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d513de73d5834bcabe57d62df36f4bc8, entries=3, sequenceid=29, filesize=5.2 K 2024-11-11T07:03:16,869 DEBUG [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/266dc5409fd84a2f9ee2889721ecb0bb as hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/266dc5409fd84a2f9ee2889721ecb0bb 2024-11-11T07:03:16,873 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/266dc5409fd84a2f9ee2889721ecb0bb, entries=1, sequenceid=29, filesize=5.0 K 2024-11-11T07:03:16,873 DEBUG [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0c13f99f2c824a47b400c45c4953d0b9 as hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0c13f99f2c824a47b400c45c4953d0b9 2024-11-11T07:03:16,877 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33957/user/jenkins/test-data/f48653e5-f3de-cbff-ebfc-af6fa1dd4d46/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0c13f99f2c824a47b400c45c4953d0b9, entries=1, sequenceid=29, filesize=4.9 K 2024-11-11T07:03:16,878 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=29, compaction requested=false 2024-11-11T07:03:16,884 INFO [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T07:03:16,884 DEBUG [M:0;f334e8b0fbdf:45229 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731308596769Disabling compacts and flushes for region at 1731308596769Disabling writes for close at 1731308596769Obtaining lock to block concurrent updates at 1731308596769Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731308596769Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731308596770 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731308596770Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731308596770Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731308596785 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731308596785Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731308596794 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731308596807 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731308596807Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731308596816 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731308596830 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731308596830Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731308596838 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731308596852 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731308596852Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20aa66ae: reopening flushed file at 1731308596860 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@662a4012: reopening flushed file at 1731308596865 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1aaa210a: reopening flushed file at 1731308596869 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38a30e1b: reopening flushed file at 1731308596873 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=29, compaction requested=false at 1731308596878 (+5 ms)Writing region close event to WAL at 1731308596884 (+6 ms)Closed at 1731308596884 2024-11-11T07:03:16,885 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,885 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,885 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,885 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,885 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T07:03:16,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36333 is added to blk_1073741830_1006 (size=10311) 2024-11-11T07:03:16,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40533 is added to blk_1073741830_1006 (size=10311) 2024-11-11T07:03:16,887 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T07:03:16,887 INFO [M:0;f334e8b0fbdf:45229 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T07:03:16,887 INFO [M:0;f334e8b0fbdf:45229 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45229 2024-11-11T07:03:16,888 INFO [M:0;f334e8b0fbdf:45229 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T07:03:16,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:03:16,990 INFO [M:0;f334e8b0fbdf:45229 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T07:03:16,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45229-0x10105d694400000, quorum=127.0.0.1:60298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T07:03:16,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d0ad989{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:03:16,992 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e58a9be{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:03:16,992 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:03:16,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2191d18b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:03:16,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d7e0513{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/hadoop.log.dir/,STOPPED} 2024-11-11T07:03:16,994 WARN [BP-595726902-172.17.0.2-1731308594980 heartbeating to localhost/127.0.0.1:33957 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:03:16,994 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:03:16,994 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:03:16,994 WARN [BP-595726902-172.17.0.2-1731308594980 heartbeating to localhost/127.0.0.1:33957 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-595726902-172.17.0.2-1731308594980 (Datanode Uuid 0865c4b1-87f9-440d-8c18-ff87f23d0042) service to localhost/127.0.0.1:33957 2024-11-11T07:03:16,994 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/data/data3/current/BP-595726902-172.17.0.2-1731308594980 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:03:16,995 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/data/data4/current/BP-595726902-172.17.0.2-1731308594980 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:03:16,995 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:03:16,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a6bcfcb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T07:03:16,997 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60c41d21{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:03:16,997 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:03:16,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@665d17bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:03:16,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a21ffab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/hadoop.log.dir/,STOPPED} 2024-11-11T07:03:16,998 WARN [BP-595726902-172.17.0.2-1731308594980 heartbeating to localhost/127.0.0.1:33957 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T07:03:16,998 WARN [BP-595726902-172.17.0.2-1731308594980 heartbeating to localhost/127.0.0.1:33957 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-595726902-172.17.0.2-1731308594980 (Datanode Uuid 461b5cf0-c9a8-4738-a28e-2498344ac4aa) service to localhost/127.0.0.1:33957 2024-11-11T07:03:16,998 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T07:03:16,998 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T07:03:16,999 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/data/data1/current/BP-595726902-172.17.0.2-1731308594980 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:03:16,999 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/cluster_f813a044-6a12-a3ec-2096-d555328d5c86/data/data2/current/BP-595726902-172.17.0.2-1731308594980 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T07:03:16,999 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T07:03:17,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c411d10{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T07:03:17,005 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a06f288{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T07:03:17,005 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T07:03:17,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a248f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T07:03:17,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3317cc02{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1277c8b0-adaa-7bdf-5fd1-ce42c9c2c000/hadoop.log.dir/,STOPPED} 2024-11-11T07:03:17,011 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T07:03:17,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T07:03:17,036 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 231) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33957 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33957 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:33957 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33957 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33957 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33957 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33957 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33957 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=539 (was 516) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=23 (was 25), ProcessCount=11 (was 11), AvailableMemoryMB=8178 (was 8196)